+ echo Logging output to /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//log_node26.txt Logging output to /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//log_node26.txt + export ASCEND_PROCESS_LOG_PATH=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/26 + ASCEND_PROCESS_LOG_PATH=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/26 + mkdir -p /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/26 + DATA_PATH=/local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml + TOKENIZER_PATH=/data_4/models/Qwen/Qwen2.5-14B-Instruct/ + CKPT_LOAD_DIR=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ + VIT_CKPT_LOAD_DIR=/ + CKPT_SAVE_DIR=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// + rsync -avh /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743/ sending incremental file list sent 71 bytes received 12 bytes 166.00 bytes/sec total size is 23.84K speedup is 287.17 + cd /local_disk/cognitron_vl/ + rm -fr datasets + mkdir -p datasets + ln -s /data/data/ datasets/CV + ln -s /data/data/LLM datasets/LLM + ln -s /data/data/LMM datasets/LMM + source /local_disk/cognitron_vl//scripts/set_env_mg_npu.sh ++ source /usr/local/Ascend/driver/bin/setenv.bash +++ DEP_INFO_FILE=/etc/ascend_install.info +++ [[ -f /etc/ascend_install.info ]] +++ . /etc/ascend_install.info +++ DRV_LIB64_COMMON_LDPATH=/driver/lib64/common +++ DRV_LIB64_DRV_LDPATH=/driver/lib64/driver +++ DRV_LIB64_LDPATH=/driver/lib64 +++ export LD_LIBRARY_PATH=/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: +++ LD_LIBRARY_PATH=/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: +++ export PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin +++ PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin ++ source /usr/local/Ascend/ascend-toolkit/set_env.sh +++ export LD_LIBRARY_PATH=/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: +++ LD_LIBRARY_PATH=/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: +++ export ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest +++ ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest ++++ arch +++ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: +++ LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: +++ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: +++ LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: +++ export PYTHONPATH=/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: +++ PYTHONPATH=/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: +++ export PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin +++ PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin +++ export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest +++ ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest +++ export ASCEND_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp +++ ASCEND_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp +++ export TOOLCHAIN_HOME=/usr/local/Ascend/ascend-toolkit/latest/toolkit +++ TOOLCHAIN_HOME=/usr/local/Ascend/ascend-toolkit/latest/toolkit +++ export ASCEND_HOME_PATH=/usr/local/Ascend/ascend-toolkit/latest +++ ASCEND_HOME_PATH=/usr/local/Ascend/ascend-toolkit/latest ++ export HCCL_CONNECT_TIMEOUT=7200 ++ HCCL_CONNECT_TIMEOUT=7200 ++ export HCCL_EXEC_TIMEOUT=7200 ++ HCCL_EXEC_TIMEOUT=7200 ++ export COMBINED_ENABLE=1 ++ COMBINED_ENABLE=1 ++ export MULTI_STREAM_MEMORY_REUSE=1 ++ MULTI_STREAM_MEMORY_REUSE=1 ++ export HCCL_RDMA_TC=160 ++ HCCL_RDMA_TC=160 ++ export HCCL_RDMA_SL=5 ++ HCCL_RDMA_SL=5 ++ export HCCL_INTRA_PCIE_ENABLE=0 ++ HCCL_INTRA_PCIE_ENABLE=0 ++ export HCCL_INTRA_ROCE_ENABLE=1 ++ HCCL_INTRA_ROCE_ENABLE=1 ++ export HCCL_RDMA_TIMEOUT=20 ++ HCCL_RDMA_TIMEOUT=20 ++ export INF_NAN_MODE_ENABLE=1 ++ INF_NAN_MODE_ENABLE=1 ++ export DISTRIBUTED_BACKEND=hccl ++ DISTRIBUTED_BACKEND=hccl ++ export ASCEND_LAUNCH_BLOCKING=0 ++ ASCEND_LAUNCH_BLOCKING=0 ++ export ASCEND_SLOG_PRINT_TO_STDOUT=0 ++ ASCEND_SLOG_PRINT_TO_STDOUT=0 ++ export ASCEND_GLOBAL_LOG_LEVEL=3 ++ ASCEND_GLOBAL_LOG_LEVEL=3 ++ export ASCEND_GLOBAL_EVENT_ENABLE=0 ++ ASCEND_GLOBAL_EVENT_ENABLE=0 ++ export TASK_QUEUE_ENABLE=1 ++ TASK_QUEUE_ENABLE=1 ++ export PTCOPY_ENABLE=1 ++ PTCOPY_ENABLE=1 ++ export COMBINED_ENABLE=1 ++ COMBINED_ENABLE=1 ++ export DYNAMIC_OP=ADD#MUL ++ DYNAMIC_OP=ADD#MUL ++ export HCCL_WHITELIST_DISABLE=1 ++ HCCL_WHITELIST_DISABLE=1 ++ export HCCL_CONNECT_TIMEOUT=7200 ++ HCCL_CONNECT_TIMEOUT=7200 ++ export HCCL_WHITELIST_DISABLE=1 ++ HCCL_WHITELIST_DISABLE=1 ++ export CUDA_DEVICE_MAX_CONNECTIONS=1 ++ CUDA_DEVICE_MAX_CONNECTIONS=1 ++ pip3 install --no-index --find-links=/data/software/ -r requirements_npu.txt Looking in links: /data/software/ Processing data/software/expecttest-0.2.1-py3-none-any.whl (from -r requirements_npu.txt (line 1)) Requirement already satisfied: peft in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 2)) (0.7.0) Processing data/software/XlsxWriter-3.2.0-py3-none-any.whl (from -r requirements_npu.txt (line 3)) Requirement already satisfied: termcolor in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 4)) (2.4.0) Requirement already satisfied: tabulate in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 5)) (0.9.0) Processing data/software/tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from -r requirements_npu.txt (line 6)) Requirement already satisfied: matplotlib in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 7)) (3.7.5) Processing data/software/datasets-3.0.0-py3-none-any.whl (from -r requirements_npu.txt (line 8)) Requirement already satisfied: einops in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 9)) (0.7.0) Processing data/software/pybind11-2.13.6-py3-none-any.whl (from -r requirements_npu.txt (line 10)) Requirement already satisfied: tensorboardX in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 11)) (2.6.2.2) Processing data/software/pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from -r requirements_npu.txt (line 12)) Requirement already satisfied: transformers>=4.40.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 13)) (4.40.1) Requirement already satisfied: deepspeed>=0.14.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 14)) (0.14.5) Processing data/software/accelerate-0.34.2-py3-none-any.whl (from -r requirements_npu.txt (line 15)) Requirement already satisfied: timm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 16)) (0.9.16) Processing data/software/flask-3.0.3-py3-none-any.whl (from -r requirements_npu.txt (line 17)) Processing data/software/Flask_RESTful-0.3.10-py2.py3-none-any.whl (from -r requirements_npu.txt (line 18)) Processing data/software/decord-0.6.0-py3-none-manylinux2010_x86_64.whl (from -r requirements_npu.txt (line 19)) Processing data/software/natsort-8.4.0-py3-none-any.whl (from -r requirements_npu.txt (line 20)) Requirement already satisfied: numpy>=1.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (1.24.4) Requirement already satisfied: packaging>=20.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (23.2) Requirement already satisfied: psutil in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (5.9.8) Requirement already satisfied: pyyaml in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (5.4.1) Requirement already satisfied: torch>=1.13.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (2.1.0+cpu) Requirement already satisfied: tqdm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (4.66.2) Requirement already satisfied: safetensors in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (0.4.2) Requirement already satisfied: huggingface-hub>=0.17.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (0.20.3) Requirement already satisfied: regex>=2022.1.18 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tiktoken->-r requirements_npu.txt (line 6)) (2023.12.25) Requirement already satisfied: requests>=2.26.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tiktoken->-r requirements_npu.txt (line 6)) (2.31.0) Requirement already satisfied: contourpy>=1.0.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (1.1.1) Requirement already satisfied: cycler>=0.10 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (0.12.1) Requirement already satisfied: fonttools>=4.22.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (4.49.0) Requirement already satisfied: kiwisolver>=1.0.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (1.4.5) Requirement already satisfied: pillow>=6.2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (10.2.0) Requirement already satisfied: pyparsing>=2.3.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (3.1.1) Requirement already satisfied: python-dateutil>=2.7 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (2.8.2) Requirement already satisfied: importlib-resources>=3.2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (6.1.2) Requirement already satisfied: filelock in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.13.1) Requirement already satisfied: dill<0.3.9,>=0.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (0.3.7) Requirement already satisfied: pandas in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2.0.3) Processing data/software/requests-2.32.3-py3-none-any.whl (from tiktoken->-r requirements_npu.txt (line 6)) Processing data/software/tqdm-4.67.1-py3-none-any.whl (from peft->-r requirements_npu.txt (line 2)) Requirement already satisfied: xxhash in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.4.1) Requirement already satisfied: multiprocess in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (0.70.15) Requirement already satisfied: fsspec<=2024.6.1,>=2023.1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from fsspec[http]<=2024.6.1,>=2023.1.0->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2023.10.0) Requirement already satisfied: aiohttp in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.9.3) Processing data/software/huggingface_hub-0.26.2-py3-none-any.whl (from peft->-r requirements_npu.txt (line 2)) Requirement already satisfied: protobuf>=3.20 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tensorboardX->-r requirements_npu.txt (line 11)) (4.25.3) Requirement already satisfied: tokenizers<0.20,>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers>=4.40.1->-r requirements_npu.txt (line 13)) (0.19.1) Requirement already satisfied: hjson in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (3.1.0) Requirement already satisfied: ninja in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (1.11.1.1) Requirement already satisfied: nvidia-ml-py in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (12.560.30) Requirement already satisfied: py-cpuinfo in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (9.0.0) Requirement already satisfied: pydantic in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (1.10.15) Processing data/software/safetensors-0.4.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from peft->-r requirements_npu.txt (line 2)) Requirement already satisfied: torchvision in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from timm->-r requirements_npu.txt (line 16)) (0.16.0) Requirement already satisfied: Werkzeug>=3.0.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (3.0.1) Requirement already satisfied: Jinja2>=3.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (3.1.3) Processing data/software/itsdangerous-2.2.0-py3-none-any.whl (from flask->-r requirements_npu.txt (line 17)) Requirement already satisfied: click>=8.1.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (8.1.7) Processing data/software/blinker-1.8.2-py3-none-any.whl (from flask->-r requirements_npu.txt (line 17)) Requirement already satisfied: importlib-metadata>=3.6.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (7.0.1) Processing data/software/aniso8601-9.0.1-py2.py3-none-any.whl (from flask_restful->-r requirements_npu.txt (line 18)) Requirement already satisfied: six>=1.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask_restful->-r requirements_npu.txt (line 18)) (1.16.0) Requirement already satisfied: pytz in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask_restful->-r requirements_npu.txt (line 18)) (2024.1) Requirement already satisfied: aiosignal>=1.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.3.1) Requirement already satisfied: attrs>=17.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (23.2.0) Requirement already satisfied: frozenlist>=1.1.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.4.1) Requirement already satisfied: multidict<7.0,>=4.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (6.0.5) Requirement already satisfied: yarl<2.0,>=1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.9.4) Requirement already satisfied: async-timeout<5.0,>=4.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (4.0.3) Requirement already satisfied: typing-extensions>=3.7.4.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from huggingface-hub>=0.17.0->peft->-r requirements_npu.txt (line 2)) (4.10.0) Requirement already satisfied: zipp>=0.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from importlib-metadata>=3.6.0->flask->-r requirements_npu.txt (line 17)) (3.17.0) Requirement already satisfied: MarkupSafe>=2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from Jinja2>=3.1.2->flask->-r requirements_npu.txt (line 17)) (2.1.5) Requirement already satisfied: charset-normalizer<4,>=2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (3.3.2) Requirement already satisfied: idna<4,>=2.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (3.6) Requirement already satisfied: urllib3<3,>=1.21.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (1.26.18) Requirement already satisfied: certifi>=2017.4.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (2024.2.2) Requirement already satisfied: sympy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (1.4) Requirement already satisfied: networkx in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (3.1) Requirement already satisfied: tzdata>=2022.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2024.1) Requirement already satisfied: mpmath>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from sympy->torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (1.3.0) DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 Installing collected packages: aniso8601, xlsxwriter, tqdm, safetensors, requests, pybind11, pyarrow, natsort, itsdangerous, expecttest, decord, blinker, tiktoken, huggingface-hub, flask, flask_restful, accelerate, datasets Attempting uninstall: tqdm Found existing installation: tqdm 4.66.2 Uninstalling tqdm-4.66.2: Successfully uninstalled tqdm-4.66.2 Attempting uninstall: safetensors Found existing installation: safetensors 0.4.2 Uninstalling safetensors-0.4.2: Successfully uninstalled safetensors-0.4.2 Attempting uninstall: requests Found existing installation: requests 2.31.0 Uninstalling requests-2.31.0: Successfully uninstalled requests-2.31.0 Attempting uninstall: pyarrow Found existing installation: pyarrow 15.0.0 Uninstalling pyarrow-15.0.0: Successfully uninstalled pyarrow-15.0.0 Attempting uninstall: huggingface-hub Found existing installation: huggingface-hub 0.20.3 Uninstalling huggingface-hub-0.20.3: Successfully uninstalled huggingface-hub-0.20.3 Attempting uninstall: accelerate Found existing installation: accelerate 0.25.0 Uninstalling accelerate-0.25.0: Successfully uninstalled accelerate-0.25.0 Attempting uninstall: datasets Found existing installation: datasets 2.16.0 Uninstalling datasets-2.16.0: Successfully uninstalled datasets-2.16.0 ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. tikit 1.8.2.240926 requires dicttoxml==1.7.4, which is not installed. tikit 1.8.2.240926 requires docopt==0.6.2, which is not installed. tikit 1.8.2.240926 requires future==0.18.2, which is not installed. tikit 1.8.2.240926 requires hdfs==2.6.0, which is not installed. tikit 1.8.2.240926 requires pure-sasl==0.6.2, which is not installed. tikit 1.8.2.240926 requires py4j==0.10.7, which is not installed. tikit 1.8.2.240926 requires PyHive[hive]==0.6.4, which is not installed. tikit 1.8.2.240926 requires pyjwt>=2.4.0, which is not installed. tikit 1.8.2.240926 requires requests-kerberos>=0.14.0, which is not installed. tikit 1.8.2.240926 requires sasl==0.3.1, which is not installed. tikit 1.8.2.240926 requires thrift==0.15.0, which is not installed. tikit 1.8.2.240926 requires thrift-sasl>=0.1.0, which is not installed. tikit 1.8.2.240926 requires certifi==2021.10.8, but you have certifi 2024.2.2 which is incompatible. tikit 1.8.2.240926 requires cos-python-sdk-v5==1.9.29, but you have cos-python-sdk-v5 1.9.26 which is incompatible. tikit 1.8.2.240926 requires idna==3.3, but you have idna 3.6 which is incompatible. tikit 1.8.2.240926 requires prettytable==2.5.0, but you have prettytable 3.11.0 which is incompatible. tikit 1.8.2.240926 requires urllib3==1.26.7, but you have urllib3 1.26.18 which is incompatible. tikit 1.8.2.240926 requires wcwidth==0.2.5, but you have wcwidth 0.2.13 which is incompatible. Successfully installed accelerate-0.34.2 aniso8601-9.0.1 blinker-1.8.2 datasets-3.0.0 decord-0.6.0 expecttest-0.2.1 flask-3.0.3 flask_restful-0.3.10 huggingface-hub-0.26.2 itsdangerous-2.2.0 natsort-8.4.0 pyarrow-17.0.0 pybind11-2.13.6 requests-2.32.3 safetensors-0.4.5 tiktoken-0.7.0 tqdm-4.67.1 xlsxwriter-3.2.0 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ++ return 0 + MEGATRON_DIR=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0/ + MINDSPEED_DIR=/local_disk/cognitron_vl//third_party/MindSpeed_core_r0.6.0/ + MODELLINK_DIR=/local_disk/cognitron_vl//third_party/ModelLink/ + pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0/ Looking in links: /data/software/ Obtaining file://local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0 Installing build dependencies: started Installing build dependencies: finished with status 'done' Checking if build backend supports build_editable: started Checking if build backend supports build_editable: finished with status 'done' Getting requirements to build editable: started Getting requirements to build editable: finished with status 'done' Installing backend dependencies: started Installing backend dependencies: finished with status 'done' Preparing editable metadata (pyproject.toml): started Preparing editable metadata (pyproject.toml): finished with status 'done' Building wheels for collected packages: megatron_core Building editable for megatron_core (pyproject.toml): started Building editable for megatron_core (pyproject.toml): finished with status 'done' Created wheel for megatron_core: filename=megatron_core-0.6.0-0.editable-cp38-cp38-linux_x86_64.whl size=8791 sha256=1c8a73544a768ff0759eb2db03ef8e548406a6700abe057332d8072922777a16 Stored in directory: /tmp/pip-ephem-wheel-cache-gr9qxvo_/wheels/54/9c/d1/d2015aa0c34e791e64d65d19395e5a9a5528f0c63fd519b9ff Successfully built megatron_core DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 Installing collected packages: megatron_core Successfully installed megatron_core-0.6.0 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv + pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/MindSpeed_core_r0.6.0/ Looking in links: /data/software/ Obtaining file://local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0 Preparing metadata (setup.py): started Preparing metadata (setup.py): finished with status 'done' WARNING: Error parsing requirements for tokenizers: [Errno 2] No such file or directory: '/root/miniconda3/envs/py38/lib/python3.8/site-packages/tokenizers-0.19.1.dist-info/METADATA' WARNING: Error parsing requirements for transformers: [Errno 2] No such file or directory: '/root/miniconda3/envs/py38/lib/python3.8/site-packages/transformers-4.40.1.dist-info/METADATA' DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 Installing collected packages: mindspeed Running setup.py develop for mindspeed Successfully installed mindspeed-0.6.0 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv + pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/ModelLink/ Looking in links: /data/software/ Obtaining file://local_disk/cognitron_vl/third_party/ModelLink Preparing metadata (setup.py): started Preparing metadata (setup.py): finished with status 'done' Requirement already satisfied: numpy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.24.4) Processing data/software/transformers-4.43.2-py3-none-any.whl (from modellink==0.0.1) Processing data/software/transformers-stream-generator-0.0.5.tar.gz (from modellink==0.0.1) Preparing metadata (setup.py): started Preparing metadata (setup.py): finished with status 'done' Requirement already satisfied: sympy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.4) Requirement already satisfied: decorator in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (5.1.1) Requirement already satisfied: scipy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.10.1) Requirement already satisfied: sentencepiece in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.2.0) Requirement already satisfied: einops in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.7.0) Requirement already satisfied: datasets in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (3.0.0) Requirement already satisfied: pybind11 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (2.13.6) Requirement already satisfied: accelerate in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.34.2) Requirement already satisfied: six in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.16.0) Requirement already satisfied: protobuf in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (4.25.3) Processing data/software/peft-0.7.1-py3-none-any.whl (from modellink==0.0.1) Requirement already satisfied: tiktoken in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.7.0) Requirement already satisfied: packaging>=20.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (23.2) Requirement already satisfied: psutil in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (5.9.8) Requirement already satisfied: pyyaml in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (5.4.1) Requirement already satisfied: torch>=1.13.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (2.1.0+cpu) Requirement already satisfied: tqdm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (4.67.1) Requirement already satisfied: safetensors in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (0.4.5) Requirement already satisfied: huggingface-hub>=0.17.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (0.26.2) Requirement already satisfied: filelock in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (3.13.1) Requirement already satisfied: regex!=2019.12.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (2023.12.25) Requirement already satisfied: requests in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (2.32.3) Processing data/software/tokenizers-0.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from transformers==4.43.2->modellink==0.0.1) Requirement already satisfied: pyarrow>=15.0.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (17.0.0) Requirement already satisfied: dill<0.3.9,>=0.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (0.3.7) Requirement already satisfied: pandas in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (2.0.3) Requirement already satisfied: xxhash in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (3.4.1) Requirement already satisfied: multiprocess in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (0.70.15) Requirement already satisfied: fsspec<=2024.6.1,>=2023.1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from fsspec[http]<=2024.6.1,>=2023.1.0->datasets->modellink==0.0.1) (2023.10.0) Requirement already satisfied: aiohttp in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (3.9.3) Requirement already satisfied: mpmath>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from sympy->modellink==0.0.1) (1.3.0) Requirement already satisfied: aiosignal>=1.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.3.1) Requirement already satisfied: attrs>=17.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (23.2.0) Requirement already satisfied: frozenlist>=1.1.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.4.1) Requirement already satisfied: multidict<7.0,>=4.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (6.0.5) Requirement already satisfied: yarl<2.0,>=1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.9.4) Requirement already satisfied: async-timeout<5.0,>=4.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (4.0.3) Requirement already satisfied: typing-extensions>=3.7.4.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from huggingface-hub>=0.17.0->peft==0.7.1->modellink==0.0.1) (4.10.0) Requirement already satisfied: charset-normalizer<4,>=2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (3.3.2) Requirement already satisfied: idna<4,>=2.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (3.6) Requirement already satisfied: urllib3<3,>=1.21.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (1.26.18) Requirement already satisfied: certifi>=2017.4.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (2024.2.2) Requirement already satisfied: networkx in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (3.1) Requirement already satisfied: jinja2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (3.1.3) Requirement already satisfied: python-dateutil>=2.8.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2.8.2) Requirement already satisfied: pytz>=2020.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2024.1) Requirement already satisfied: tzdata>=2022.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2024.1) Requirement already satisfied: MarkupSafe>=2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from jinja2->torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (2.1.5) Building wheels for collected packages: transformers_stream_generator Building wheel for transformers_stream_generator (setup.py): started Building wheel for transformers_stream_generator (setup.py): finished with status 'done' Created wheel for transformers_stream_generator: filename=transformers_stream_generator-0.0.5-py3-none-any.whl size=12425 sha256=53a0efa1548230be4832bd2d5f76d2b932ac2ffee1961d12082c62ce27bcc265 Stored in directory: /root/.cache/pip/wheels/56/8c/42/5381d9c36bc85f28982f4cf8f98dc44d37a6d6c04897a5cb7c Successfully built transformers_stream_generator DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 Installing collected packages: tokenizers, transformers, transformers_stream_generator, peft, modellink Attempting uninstall: tokenizers Found existing installation: tokenizers 0.20.3 Uninstalling tokenizers-0.20.3: Successfully uninstalled tokenizers-0.20.3 Attempting uninstall: transformers Found existing installation: transformers 4.46.3 Uninstalling transformers-4.46.3: Successfully uninstalled transformers-4.46.3 Attempting uninstall: peft Found existing installation: peft 0.7.0 Uninstalling peft-0.7.0: Successfully uninstalled peft-0.7.0 Running setup.py develop for modellink Successfully installed modellink-0.0.1 peft-0.7.1 tokenizers-0.19.1 transformers-4.43.2 transformers_stream_generator-0.0.5 WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv + export PYTHONPATH=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0//:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: + PYTHONPATH=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0//:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: + GPUS_PER_NODE=16 + NNODES=32 + NODE_RANK=26 + MASTER_PORT=34567 + export CUDA_DEVICE_MAX_CONNECTIONS=1 + CUDA_DEVICE_MAX_CONNECTIONS=1 + export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True + PYTORCH_NPU_ALLOC_CONF=expandable_segments:True + VISION_SEQ_LENGTH=1025 + IMAGE_TOKEN_LENGTH=256 + IMAGE_SIZE=448 + VISION_MODEL_TYPE=intern_300m + TP=8 + PP=1 + CP=8 + CP_ALGO=megatron_cp_algo + CP_MASK=causal + DISTRIBUTED_ARGS=' --nproc_per_node 16 --nnodes 32 --node_rank 26 --master_addr train-1198772881325351168-93vlj4s2getc-master-0.train-100034032793.svc.cluster.local --master_port 34567 ' + GPT_ARGS=' --use-mcore-models --tensor-model-parallel-size 8 --pipeline-model-parallel-size 1 --context-parallel-size 8 --context-parallel-algo megatron_cp_algo --cp-attention-mask-type causal --use-cp-send-recv-overlap --no-create-attention-mask-in-dataloader --sparse-mode 4 --sequence-parallel --recompute-method block --recompute-granularity full --recompute-num-layers 48 --num-layers 48 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --group-query-attention --num-query-groups 8 --tokenizer-type PretrainedFromHF --tokenizer-name-or-path /data_4/models/Qwen/Qwen2.5-14B-Instruct/ --seq-length 1048576 --max-position-embeddings 1048576 --micro-batch-size 1 --global-batch-size 8 --make-vocab-size-divisible-by 1 --padded-vocab-size 152064 --rotary-base 1000000.0 --lr 5.00e-6 --train-iters 500 --lr-decay-style cosine --untie-embeddings-and-output-weights --disable-bias-linear --attention-dropout 0.0 --init-method-std 0.01 --hidden-dropout 0.0 --position-embedding-type rope --normalization RMSNorm --use-fused-rmsnorm --norm-epsilon 1e-6 --swiglu --use-flash-attn --use-fused-rotary-pos-emb --use-rotary-position-embeddings --use-fused-swiglu --use-mc2 --no-masked-softmax-fusion --attention-softmax-in-fp32 --min-lr 1.00e-7 --weight-decay 0.0 --lr-warmup-fraction 0.03 --clip-grad 1.0 --adam-beta1 0.9 --adam-beta2 0.999 --add-qkv-bias --initial-loss-scale 4096 --no-gradient-accumulation-fusion --use-distributed-optimizer --bf16 --overlap-grad-reduce --finetune --vision-model-freeze --vision-model-type intern_300m --vision-downsample-ratio 0.5 --vision-projector-type mlp --vision-projector-pre-norm --vision-process-type dynamic --vision-normalize-type imagenet --vision-seq-length 1025 --image-token-length 256 --image-size 448 --prompt-format qwen2 --is-instruction-dataset --max-num-image 4096 --max-fps 1 --add-class-token --min-patch-grid 1 --max-patch-grid 12 --logit-mask --cross-dataset-joint ' + DATA_ARGS=' --data-path /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml --split 100,0,0 --data-seq-length 1048576 --num-workers 8 ' + CKPT_ARGS=' --load /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ --vit-load / --no-load-optim --no-load-rng --seed 42424242 --save /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// ' + OUTPUT_ARGS=' --log-interval 1 --save-interval 20 --eval-interval 20 --eval-iters 0 --log-throughput --distributed-timeout-minutes 120 ' + torchrun --nproc_per_node 16 --nnodes 32 --node_rank 26 --master_addr train-1198772881325351168-93vlj4s2getc-master-0.train-100034032793.svc.cluster.local --master_port 34567 /local_disk/cognitron_vl//lcvlm_modellink/pretrain_lcvlm.py --use-mcore-models --tensor-model-parallel-size 8 --pipeline-model-parallel-size 1 --context-parallel-size 8 --context-parallel-algo megatron_cp_algo --cp-attention-mask-type causal --use-cp-send-recv-overlap --no-create-attention-mask-in-dataloader --sparse-mode 4 --sequence-parallel --recompute-method block --recompute-granularity full --recompute-num-layers 48 --num-layers 48 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --group-query-attention --num-query-groups 8 --tokenizer-type PretrainedFromHF --tokenizer-name-or-path /data_4/models/Qwen/Qwen2.5-14B-Instruct/ --seq-length 1048576 --max-position-embeddings 1048576 --micro-batch-size 1 --global-batch-size 8 --make-vocab-size-divisible-by 1 --padded-vocab-size 152064 --rotary-base 1000000.0 --lr 5.00e-6 --train-iters 500 --lr-decay-style cosine --untie-embeddings-and-output-weights --disable-bias-linear --attention-dropout 0.0 --init-method-std 0.01 --hidden-dropout 0.0 --position-embedding-type rope --normalization RMSNorm --use-fused-rmsnorm --norm-epsilon 1e-6 --swiglu --use-flash-attn --use-fused-rotary-pos-emb --use-rotary-position-embeddings --use-fused-swiglu --use-mc2 --no-masked-softmax-fusion --attention-softmax-in-fp32 --min-lr 1.00e-7 --weight-decay 0.0 --lr-warmup-fraction 0.03 --clip-grad 1.0 --adam-beta1 0.9 --adam-beta2 0.999 --add-qkv-bias --initial-loss-scale 4096 --no-gradient-accumulation-fusion --use-distributed-optimizer --bf16 --overlap-grad-reduce --finetune --vision-model-freeze --vision-model-type intern_300m --vision-downsample-ratio 0.5 --vision-projector-type mlp --vision-projector-pre-norm --vision-process-type dynamic --vision-normalize-type imagenet --vision-seq-length 1025 --image-token-length 256 --image-size 448 --prompt-format qwen2 --is-instruction-dataset --max-num-image 4096 --max-fps 1 --add-class-token --min-patch-grid 1 --max-patch-grid 12 --logit-mask --cross-dataset-joint --data-path /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml --split 100,0,0 --data-seq-length 1048576 --num-workers 8 --load /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ --vit-load / --no-load-optim --no-load-rng --seed 42424242 --save /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// --log-interval 1 --save-interval 20 --eval-interval 20 --eval-iters 0 --log-throughput --distributed-timeout-minutes 120 --distributed-backend nccl [2024-11-28 15:50:33,759] torch.distributed.run: [WARNING] [2024-11-28 15:50:33,759] torch.distributed.run: [WARNING] ***************************************** [2024-11-28 15:50:33,759] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. [2024-11-28 15:50:33,759] torch.distributed.run: [WARNING] ***************************************** Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Creating extension directory /root/.cache/torch_extensions/py38_cpu/adaptive_cp... Emitting ninja build file /root/.cache/torch_extensions/py38_cpu/adaptive_cp/build.ninja... Building extension module adaptive_cp... Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N) Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root...Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... [1/2] c++ -MMD -MF adaptive_cp.o.d -DTORCH_EXTENSION_NAME=adaptive_cp -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -I/usr/local/Ascend/ascend-toolkit/latest/include -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/include -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/third_party -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/acl -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/inc -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/TH -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/THC -isystem /root/miniconda3/envs/py38/include/python3.8 -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++17 -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack -fPIC -pie -Wl,--disable-new-dtags,--rpath -s -O2 -c local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/ops/csrc/algorithm/adaptive_cp/adaptive_cp.cpp -o adaptive_cp.o [2/2] c++ adaptive_cp.o -shared -L/usr/local/Ascend/ascend-toolkit/latest/lib64 -lascendcl -L/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/lib -ltorch_npu -L/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/lib -lc10 -ltorch_cpu -ltorch -ltorch_python -o adaptive_cp.so Loading extension module adaptive_cp... Loading extension module adaptive_cp... Loading extension module adaptive_cp... Loading extension module adaptive_cp... local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") Loading extension module adaptive_cp... Loading extension module adaptive_cp... local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") Loading extension module adaptive_cp... Loading extension module adaptive_cp... Loading extension module adaptive_cp... local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") Loading extension module adaptive_cp... local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") Loading extension module adaptive_cp... local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") Loading extension module adaptive_cp... local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") Loading extension module adaptive_cp... local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") Loading extension module adaptive_cp... Loading extension module adaptive_cp... local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") Loading extension module adaptive_cp... local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 warnings.warn("failed to generate the npu_matmul_add_fp32") /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( /root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? warn( > compiling dataset index builder ... make: Entering directory 'local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0/megatron/core/datasets' make: Nothing to be done for 'default'. make: Leaving directory 'local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0/megatron/core/datasets' >>> done with dataset index builder. Compilation time: 0.648 seconds vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute False vision_projector_recompute Falsevision_projector_recompute False vision_model_freeze vision_model_freeze => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. => set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. vision_model_freeze=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. vision_model_freeze=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. vision_model_freeze=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. vision_model_freeze=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. vision_model_freeze => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() )=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. vision_model_freeze=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. vision_model_freeze=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. vision_model_freeze=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. => set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() )=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.vision_model_freeze => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. vision_model_freeze=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() )=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.vision_model_freeze => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) vision_model_freeze => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. => set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) => set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() )=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. vision_model_freeze => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. => set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) vision_model_freeze => set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. => set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. => set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. => set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) model GPTVLModel( (external_feature_model): MegatronVisionModel( (vit): InternViTModel( (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) (position_embeddings): Embedding(1025, 1024) (decoder): TransformerBlock( (layers): ModuleList( (0-23): 24 x InternViTTransformerLayer( (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() ) (self_attn_bda): IdentityFuncOp() (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) (mlp_bda): IdentityFuncOp() ) ) ) ) (vision_projection): MultimodalProjector( (encoder): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) ) (embedding): LanguageModelEmbedding( (word_embeddings): VocabParallelEmbedding() (embedding_dropout): Dropout(p=0.0, inplace=False) ) (rotary_pos_emb): RotaryEmbedding() (decoder): TransformerBlock( (layers): ModuleList( (0-47): 48 x TransformerLayer( (input_layernorm): RMSNorm() (self_attention): SelfAttention( (core_attention): DotProductAttention( (scale_mask_softmax): FusedScaleMaskSoftmax() (attention_dropout): Dropout(p=0.0, inplace=False) ) (linear_proj): RowParallelLinear() (linear_qkv): ColumnParallelLinear() (q_layernorm): IdentityOp() (k_layernorm): IdentityOp() ) (pre_cross_attn_layernorm): IdentityOp() (cross_attention): IdentityOp() (cross_attn_bda): IdentityFuncOp() (pre_mlp_layernorm): RMSNorm() (mlp): MLP( (linear_fc1): ColumnParallelLinear() (linear_fc2): RowParallelLinear() ) ) ) (final_layernorm): RMSNorm() ) (output_layer): ColumnParallelLinear() ) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) _get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) _get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) _load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration _load_base_checkpoint iteration _load_base_checkpoint iteration_load_base_checkpoint iteration1000 _load_base_checkpoint iteration10001000 1000_load_base_checkpoint iteration1000 _load_base_checkpoint iteration 1000 1000 1000 1000 _load_base_checkpoint release _load_base_checkpoint release_load_base_checkpoint release 1000 False1000 _load_base_checkpoint release_load_base_checkpoint release_load_base_checkpoint release _load_base_checkpoint release_load_base_checkpoint release10001000 10001000 _load_base_checkpoint release _load_base_checkpoint releaseFalseFalse 1000False False _load_base_checkpoint release_load_base_checkpoint release False_load_base_checkpoint release_load_base_checkpoint releaseFalseFalse False False _load_base_checkpoint release False False _load_base_checkpoint release FalseFalse False False _load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_04/model_optim_rng.pt _load_base_checkpoint_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_00/model_optim_rng.pt/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_07/model_optim_rng.pt _load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_05/model_optim_rng.pt _load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_06/model_optim_rng.pt _load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_00/model_optim_rng.pt _load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_01/model_optim_rng.pt _load_base_checkpoint_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_02/model_optim_rng.pt/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_01/model_optim_rng.pt _load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_06/model_optim_rng.pt _load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_07/model_optim_rng.pt _load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_03/model_optim_rng.pt _load_base_checkpoint_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_04/model_optim_rng.pt/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_03/model_optim_rng.pt _load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_02/model_optim_rng.pt _load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_05/model_optim_rng.pt load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True load_checkpoint iteration 0 load_checkpoint release False strict True > rank 428 does not create GPT datasets ... > rank 423 does not create GPT datasets ... > rank 425 does not create GPT datasets ... > rank 426 does not create GPT datasets ... > rank 419 does not create GPT datasets ...> rank 430 does not create GPT datasets ... > rank 424 is creating GPT datasets ... > rank 416 is creating GPT datasets ... > rank 417 does not create GPT datasets ...> rank 421 does not create GPT datasets ...> rank 427 does not create GPT datasets ... > rank 420 does not create GPT datasets ... > rank 422 does not create GPT datasets ... > rank 429 does not create GPT datasets ... > rank 431 does not create GPT datasets ... > rank 418 does not create GPT datasets ... target_ratios [(1, 1), (1, 2), (2, 1), (3, 1), (1, 3), (2, 2), (4, 1), (1, 4), (5, 1), (1, 5), (1, 6), (6, 1), (3, 2), (2, 3), (7, 1), (1, 7), (4, 2), (2, 4), (1, 8), (8, 1), (1, 9), (3, 3), (9, 1), (2, 5), (5, 2), (10, 1), (1, 10), (11, 1), (1, 11), (12, 1), (3, 4), (4, 3), (1, 12), (6, 2), (2, 6)] possible_resolutions [[448, 448], [448, 896], [896, 448], [1344, 448], [448, 1344], [896, 896], [1792, 448], [448, 1792], [2240, 448], [448, 2240], [448, 2688], [2688, 448], [1344, 896], [896, 1344], [3136, 448], [448, 3136], [1792, 896], [896, 1792], [448, 3584], [3584, 448], [448, 4032], [1344, 1344], [4032, 448], [896, 2240], [2240, 896], [4480, 448], [448, 4480], [4928, 448], [448, 4928], [5376, 448], [1344, 1792], [1792, 1344], [448, 5376], [2688, 896], [896, 2688]] target_ratios [(1, 1), (1, 2), (2, 1), (3, 1), (1, 3), (2, 2), (4, 1), (1, 4), (5, 1), (1, 5), (1, 6), (6, 1), (3, 2), (2, 3), (7, 1), (1, 7), (4, 2), (2, 4), (1, 8), (8, 1), (1, 9), (3, 3), (9, 1), (2, 5), (5, 2), (10, 1), (1, 10), (11, 1), (1, 11), (12, 1), (3, 4), (4, 3), (1, 12), (6, 2), (2, 6)] possible_resolutions [[448, 448], [448, 896], [896, 448], [1344, 448], [448, 1344], [896, 896], [1792, 448], [448, 1792], [2240, 448], [448, 2240], [448, 2688], [2688, 448], [1344, 896], [896, 1344], [3136, 448], [448, 3136], [1792, 896], [896, 1792], [448, 3584], [3584, 448], [448, 4032], [1344, 1344], [4032, 448], [896, 2240], [2240, 896], [4480, 448], [448, 4480], [4928, 448], [448, 4928], [5376, 448], [1344, 1792], [1792, 1344], [448, 5376], [2688, 896], [896, 2688]] [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x561289b960c0] mmco: unref short failure [h264 @ 0x561289b960c0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a054d40] [h264 @ 0x55e22a926d00] mmco: unref short failure mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128cc29980] mmco: unref short failure [h264 @ 0x55e22a924fc0] mmco: unref short failure [h264 @ 0x5612891da640] mmco: unref short failure [h264 @ 0x5612891da640] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure processed_samples 100 unjoint_samples 100 joint_samples 0 [186022, 193775] processed_samples 100 unjoint_samples 100 joint_samples 0 [186022, 193775] processed_samples 100 unjoint_samples 100 joint_samples 0 [142946, 145668] processed_samples 100 unjoint_samples 100 joint_samples 0 [142946, 145668] processed_samples 100 unjoint_samples 100 joint_samples 0 [120768, 118820] processed_samples 100 unjoint_samples 100 joint_samples 0 [120768, 118820] processed_samples 100 unjoint_samples 100 joint_samples 0 [134093, 132797] processed_samples 100 unjoint_samples 100 joint_samples 0 [134093, 132797] processed_samples 100 unjoint_samples 100 joint_samples 0 [153885, 155375] processed_samples 100 unjoint_samples 100 joint_samples 0 [153885, 155375] processed_samples 100 unjoint_samples 100 joint_samples 0 [195666, 194762] processed_samples 100 unjoint_samples 100 joint_samples 0 [195666, 194762] processed_samples 100 unjoint_samples 100 joint_samples 0 [183916, 182351] processed_samples 100 unjoint_samples 100 joint_samples 0 [183916, 182351] processed_samples 100 unjoint_samples 100 joint_samples 0 [221487, 239451] processed_samples 100 unjoint_samples 100 joint_samples 0 [221487, 239451] [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22bd0c140] mmco: unref short failure [h264 @ 0x55e22bd0c140] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a90c780] mmco: unref short failure [h264 @ 0x55e22a90c780] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure processed_samples 200 unjoint_samples 200 joint_samples 0 [368675, 369232] processed_samples 200 unjoint_samples 200 joint_samples 0 [368675, 369232] [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure processed_samples 200 unjoint_samples 200 joint_samples 0 [285334, 262119] processed_samples 200 unjoint_samples 200 joint_samples 0 [285334, 262119] processed_samples 200 unjoint_samples 200 joint_samples 0 [404083, 461014] processed_samples 200 unjoint_samples 200 joint_samples 0 [358506, 326145] processed_samples 200 unjoint_samples 200 joint_samples 0 [404083, 461014] processed_samples 200 unjoint_samples 200 joint_samples 0 [358506, 326145] processed_samples 200 unjoint_samples 200 joint_samples 0 [405238, 408280] processed_samples 200 unjoint_samples 200 joint_samples 0 [345019, 450765] processed_samples 200 unjoint_samples 200 joint_samples 0 [345019, 450765] processed_samples 200 unjoint_samples 200 joint_samples 0 [344398, 329367] processed_samples 200 unjoint_samples 200 joint_samples 0 [344398, 329367] processed_samples 200 unjoint_samples 200 joint_samples 0 [405238, 408280] processed_samples 200 unjoint_samples 200 joint_samples 0 [397728, 418110] processed_samples 200 unjoint_samples 200 joint_samples 0 [397728, 418110] [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure processed_samples 300 unjoint_samples 300 joint_samples 0 [547782, 548394] processed_samples 300 unjoint_samples 300 joint_samples 0 [547782, 548394] processed_samples 300 unjoint_samples 300 joint_samples 0 [462996, 469567] processed_samples 300 unjoint_samples 300 joint_samples 0 [462996, 469567] processed_samples 300 unjoint_samples 300 joint_samples 0 [498351, 574842] processed_samples 300 unjoint_samples 300 joint_samples 0 [498351, 574842] processed_samples 300 unjoint_samples 300 joint_samples 0 [494520, 480139] processed_samples 300 unjoint_samples 300 joint_samples 0 [494520, 480139] processed_samples 300 unjoint_samples 300 joint_samples 0 [522257, 523079] processed_samples 300 unjoint_samples 300 joint_samples 0 [522257, 523079] processed_samples 300 unjoint_samples 300 joint_samples 0 [582775, 582931] processed_samples 300 unjoint_samples 300 joint_samples 0 [582775, 582931] processed_samples 300 unjoint_samples 300 joint_samples 0 [601410, 598427] processed_samples 300 unjoint_samples 300 joint_samples 0 [601410, 598427] processed_samples 300 unjoint_samples 300 joint_samples 0 [566295, 566169] processed_samples 300 unjoint_samples 300 joint_samples 0 [566295, 566169] [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561288a14b40] mmco: unref short failure [h264 @ 0x561288a14b40] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128979a440] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure processed_samples 400 unjoint_samples 400 joint_samples 0 [836901, 719403] processed_samples 400 unjoint_samples 400 joint_samples 0 [836901, 719403] processed_samples 400 unjoint_samples 400 joint_samples 0 [667466, 667618] processed_samples 400 unjoint_samples 400 joint_samples 0 [667466, 667618] processed_samples 400 unjoint_samples 400 joint_samples 0 [660100, 660565] processed_samples 400 unjoint_samples 400 joint_samples 0 [631431, 630399] processed_samples 400 unjoint_samples 400 joint_samples 0 [720203, 721553] processed_samples 400 unjoint_samples 400 joint_samples 0 [720203, 721553] processed_samples 400 unjoint_samples 400 joint_samples 0 [795830, 794904] processed_samples 400 unjoint_samples 400 joint_samples 0 [631431, 630399] processed_samples 400 unjoint_samples 400 joint_samples 0 [660100, 660565] processed_samples 400 unjoint_samples 400 joint_samples 0 [795830, 794904] processed_samples 400 unjoint_samples 400 joint_samples 0 [647763, 646566] processed_samples 400 unjoint_samples 400 joint_samples 0 [647763, 646566] processed_samples 400 unjoint_samples 400 joint_samples 0 [646835, 647985] processed_samples 400 unjoint_samples 400 joint_samples 0 [646835, 647985] [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure processed_samples 500 unjoint_samples 500 joint_samples 0 [911947, 913588] processed_samples 500 unjoint_samples 500 joint_samples 0 [911947, 913588] processed_samples 500 unjoint_samples 500 joint_samples 0 [833413, 833755] processed_samples 500 unjoint_samples 500 joint_samples 0 [833413, 833755] processed_samples 500 unjoint_samples 500 joint_samples 0 [837629, 827409] processed_samples 500 unjoint_samples 500 joint_samples 0 [837629, 827409] processed_samples 500 unjoint_samples 500 joint_samples 0 [812938, 814161] processed_samples 500 unjoint_samples 500 joint_samples 0 [812938, 814161] processed_samples 500 unjoint_samples 500 joint_samples 0 [801158, 802599] processed_samples 500 unjoint_samples 500 joint_samples 0 [860184, 858214] processed_samples 500 unjoint_samples 500 joint_samples 0 [860184, 858214] processed_samples 500 unjoint_samples 500 joint_samples 0 [801158, 802599] processed_samples 500 unjoint_samples 500 joint_samples 0 [1004051, 969682] processed_samples 500 unjoint_samples 500 joint_samples 0 [1004051, 969682] processed_samples 500 unjoint_samples 500 joint_samples 0 [875155, 877039] processed_samples 500 unjoint_samples 500 joint_samples 0 [875155, 877039] [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [mov,mp4,m4a,3gp,3g2,mj2 @ 0x55e228a2a840] stream 1, offset 0x14007e3: partial file [mov,mp4,m4a,3gp,3g2,mj2 @ 0x56128b6ede00] stream 1, offset 0x14007e3: partial file [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure ................................................................................................[h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure processed_samples 600 unjoint_samples 600 joint_samples 0 [1044187, 1044172] processed_samples 600 unjoint_samples 600 joint_samples 0 [1044187, 1044172] processed_samples 601 unjoint_samples 600 joint_samples 0 [1041744, 1004673] processed_samples 601 unjoint_samples 600 joint_samples 0 [1041744, 1004673] processed_samples 600 unjoint_samples 600 joint_samples 1 [304513, 1032496] processed_samples 600 unjoint_samples 600 joint_samples 1 [304513, 1032496] processed_samples 600 unjoint_samples 600 joint_samples 0 [1010221, 1009608] processed_samples 600 unjoint_samples 600 joint_samples 0 [1010221, 1009608] processed_samples 600 unjoint_samples 600 joint_samples 0 [978356, 1011143] processed_samples 600 unjoint_samples 600 joint_samples 0 [960096, 960215] processed_samples 600 unjoint_samples 600 joint_samples 0 [978356, 1011143] processed_samples 600 unjoint_samples 600 joint_samples 0 [960096, 960215] processed_samples 600 unjoint_samples 600 joint_samples 1 [10666, 1043577] processed_samples 600 unjoint_samples 600 joint_samples 1 [10666, 1043577] processed_samples 600 unjoint_samples 600 joint_samples 0 [1026795, 1026473] processed_samples 600 unjoint_samples 600 joint_samples 0 [1026795, 1026473] [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure processed_samples 700 unjoint_samples 700 joint_samples 1 [226088, 1046842] processed_samples 700 unjoint_samples 700 joint_samples 1 [226088, 1046842] processed_samples 700 unjoint_samples 700 joint_samples 1 [428195, 1046446] processed_samples 700 unjoint_samples 700 joint_samples 1 [428195, 1046446] [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure processed_samples 700 unjoint_samples 700 joint_samples 1 [162196, 1047135] processed_samples 700 unjoint_samples 700 joint_samples 1 [162196, 1047135] processed_samples 701 unjoint_samples 700 joint_samples 1 [308491, 1047272] processed_samples 701 unjoint_samples 700 joint_samples 1 [308491, 1047272] processed_samples 700 unjoint_samples 700 joint_samples 1 [393814, 1043577] processed_samples 700 unjoint_samples 700 joint_samples 1 [393814, 1043577] processed_samples 700 unjoint_samples 700 joint_samples 1 [1041219, 160553] processed_samples 700 unjoint_samples 700 joint_samples 1 [1041219, 160553] processed_samples 700 unjoint_samples 700 joint_samples 1 [647690, 1032496] processed_samples 700 unjoint_samples 700 joint_samples 1 [1047990, 224834] processed_samples 700 unjoint_samples 700 joint_samples 1 [1047990, 224834] processed_samples 700 unjoint_samples 700 joint_samples 1 [647690, 1032496] [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure processed_samples 800 unjoint_samples 800 joint_samples 1 [1047990, 557500] processed_samples 800 unjoint_samples 800 joint_samples 1 [1047990, 557500] [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 800 unjoint_samples 800 joint_samples 1 [1041219, 394607] processed_samples 800 unjoint_samples 800 joint_samples 1 [1041219, 394607] processed_samples 800 unjoint_samples 800 joint_samples 1 [643585, 1043577] processed_samples 800 unjoint_samples 800 joint_samples 1 [708885, 1046446] processed_samples 800 unjoint_samples 800 joint_samples 1 [643585, 1043577] processed_samples 800 unjoint_samples 800 joint_samples 1 [708885, 1046446] processed_samples 800 unjoint_samples 800 joint_samples 1 [398612, 1047135] processed_samples 800 unjoint_samples 800 joint_samples 1 [398612, 1047135] processed_samples 800 unjoint_samples 800 joint_samples 1 [686423, 1046842] processed_samples 800 unjoint_samples 800 joint_samples 1 [686423, 1046842] processed_samples 801 unjoint_samples 800 joint_samples 1 [666478, 1047272] processed_samples 801 unjoint_samples 800 joint_samples 1 [666478, 1047272] processed_samples 800 unjoint_samples 800 joint_samples 1 [993812, 1032496] processed_samples 800 unjoint_samples 800 joint_samples 1 [993812, 1032496] [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure processed_samples 900 unjoint_samples 900 joint_samples 1 [1016361, 1046446] processed_samples 900 unjoint_samples 900 joint_samples 1 [1016361, 1046446] processed_samples 900 unjoint_samples 900 joint_samples 2 [13992, 1046842] processed_samples 900 unjoint_samples 900 joint_samples 2 [13992, 1046842] processed_samples 900 unjoint_samples 900 joint_samples 2 [1040643, 355511] processed_samples 900 unjoint_samples 900 joint_samples 1 [1041219, 670899] processed_samples 900 unjoint_samples 900 joint_samples 2 [1040643, 355511] processed_samples 900 unjoint_samples 900 joint_samples 1 [1041219, 670899] processed_samples 900 unjoint_samples 900 joint_samples 2 [969831, 153779] processed_samples 900 unjoint_samples 900 joint_samples 2 [969831, 153779] processed_samples 900 unjoint_samples 900 joint_samples 1 [759306, 1047135] processed_samples 900 unjoint_samples 900 joint_samples 1 [759306, 1047135] processed_samples 900 unjoint_samples 900 joint_samples 1 [1047990, 969366] processed_samples 900 unjoint_samples 900 joint_samples 1 [1047990, 969366] processed_samples 901 unjoint_samples 900 joint_samples 1 [896683, 1047272] processed_samples 901 unjoint_samples 900 joint_samples 1 [896683, 1047272] [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1040072, 267141] processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1040072, 267141] processed_samples 1000 unjoint_samples 1000 joint_samples 2 [182886, 1047546] processed_samples 1000 unjoint_samples 1000 joint_samples 2 [182886, 1047546] processed_samples 1000 unjoint_samples 1000 joint_samples 2 [282074, 1046842] processed_samples 1000 unjoint_samples 1000 joint_samples 2 [282074, 1046842] processed_samples 1001 unjoint_samples 1000 joint_samples 2 [1030657, 312127] processed_samples 1001 unjoint_samples 1000 joint_samples 2 [1030657, 312127] processed_samples 1000 unjoint_samples 1000 joint_samples 2 [969831, 439330] processed_samples 1000 unjoint_samples 1000 joint_samples 2 [969831, 439330] processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1040643, 607211] processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1040643, 607211] processed_samples 1000 unjoint_samples 1000 joint_samples 1 [1041219, 1028297] processed_samples 1000 unjoint_samples 1000 joint_samples 1 [1041219, 1028297] processed_samples 1000 unjoint_samples 1000 joint_samples 1 [1003739, 1047135] processed_samples 1000 unjoint_samples 1000 joint_samples 1 [1003739, 1047135] [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1040072, 626195] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1040072, 626195] [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1040643, 904670] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1040643, 904670] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [450893, 1047546] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [450893, 1047546] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [273189, 1046987] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [273189, 1046987] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [210795, 1047135] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [210795, 1047135] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [533667, 1046842] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [533667, 1046842] [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure processed_samples 1101 unjoint_samples 1100 joint_samples 2 [1030657, 679216] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [969831, 733096] processed_samples 1100 unjoint_samples 1100 joint_samples 2 [969831, 733096] processed_samples 1101 unjoint_samples 1100 joint_samples 2 [1030657, 679216] [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure processed_samples 1200 unjoint_samples 1200 joint_samples 2 [667526, 1046987] processed_samples 1200 unjoint_samples 1200 joint_samples 2 [667526, 1046987] [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure processed_samples 1200 unjoint_samples 1200 joint_samples 2 [936379, 1046842] processed_samples 1201 unjoint_samples 1200 joint_samples 2 [1030657, 947100] processed_samples 1200 unjoint_samples 1200 joint_samples 3 [242674, 1035464] processed_samples 1200 unjoint_samples 1200 joint_samples 3 [242674, 1035464] processed_samples 1200 unjoint_samples 1200 joint_samples 2 [936379, 1046842] processed_samples 1201 unjoint_samples 1200 joint_samples 2 [1030657, 947100] processed_samples 1200 unjoint_samples 1200 joint_samples 2 [483005, 1047135] processed_samples 1200 unjoint_samples 1200 joint_samples 2 [483005, 1047135] processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1040072, 928531] processed_samples 1200 unjoint_samples 1200 joint_samples 2 [979282, 975805] processed_samples 1200 unjoint_samples 1200 joint_samples 2 [979282, 975805] processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1040072, 928531] processed_samples 1200 unjoint_samples 1200 joint_samples 2 [816679, 1047546] processed_samples 1200 unjoint_samples 1200 joint_samples 2 [816679, 1047546] [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1047254, 139509] processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1047254, 139509] processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1043583, 270915] processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1043583, 270915] processed_samples 1300 unjoint_samples 1300 joint_samples 2 [803415, 1047135] processed_samples 1300 unjoint_samples 1300 joint_samples 2 [803415, 1047135] processed_samples 1301 unjoint_samples 1300 joint_samples 3 [174802, 1046728] processed_samples 1301 unjoint_samples 1300 joint_samples 3 [174802, 1046728] processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1047365, 50014] processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1047365, 50014] processed_samples 1300 unjoint_samples 1300 joint_samples 3 [554809, 1035464] processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1046111, 75712] processed_samples 1300 unjoint_samples 1300 joint_samples 3 [554809, 1035464] processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1046111, 75712] processed_samples 1300 unjoint_samples 1300 joint_samples 2 [992945, 1046987] processed_samples 1300 unjoint_samples 1300 joint_samples 2 [992945, 1046987] [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1046111, 652971] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1047365, 366106] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1047018, 235307] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1046111, 652971] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1047018, 235307] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1047365, 366106] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1042381, 170469] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1042381, 170469] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1043583, 524360] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1043583, 524360] processed_samples 1401 unjoint_samples 1400 joint_samples 3 [429733, 1046728] processed_samples 1401 unjoint_samples 1400 joint_samples 3 [429733, 1046728] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1047254, 457149] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1047254, 457149] [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure processed_samples 1400 unjoint_samples 1400 joint_samples 3 [920529, 1035464] processed_samples 1400 unjoint_samples 1400 joint_samples 3 [920529, 1035464] [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [mov,mp4,m4a,3gp,3g2,mj2 @ 0x55e228a2a840] [mov,mp4,m4a,3gp,3g2,mj2 @ 0x56128a593480] stream 0, offset 0x90050f: partial file stream 0, offset 0x90050f: partial file processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1043583, 757680] processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1043583, 757680] processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1042381, 482939] processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1047018, 651929] processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1042381, 482939] processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1047018, 651929] processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1047254, 755018] processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1047254, 755018] processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1047365, 817895] processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1047365, 817895] processed_samples 1500 unjoint_samples 1500 joint_samples 4 [1047386, 160348] processed_samples 1500 unjoint_samples 1500 joint_samples 4 [1047386, 160348] processed_samples 1501 unjoint_samples 1500 joint_samples 3 [727440, 1046728] processed_samples 1501 unjoint_samples 1500 joint_samples 3 [727440, 1046728] [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure processed_samples 1501 unjoint_samples 1500 joint_samples 3 [1046111, 955947] processed_samples 1501 unjoint_samples 1500 joint_samples 3 [1046111, 955947] [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22e36d140] mmco: unref short failure [h264 @ 0x55e22e36d140] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22e36d140] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure processed_samples 1600 unjoint_samples 1600 joint_samples 4 [37478, 1045232] processed_samples 1600 unjoint_samples 1600 joint_samples 4 [37478, 1045232] processed_samples 1600 unjoint_samples 1600 joint_samples 4 [217280, 1040076] processed_samples 1600 unjoint_samples 1600 joint_samples 4 [217280, 1040076] processed_samples 1600 unjoint_samples 1600 joint_samples 4 [1047386, 468651] processed_samples 1600 unjoint_samples 1600 joint_samples 4 [1047386, 468651] processed_samples 1601 unjoint_samples 1600 joint_samples 3 [979413, 1046728] processed_samples 1601 unjoint_samples 1600 joint_samples 3 [979413, 1046728] processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1042381, 752060] processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1047018, 923612] processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1042381, 752060] processed_samples 1601 unjoint_samples 1600 joint_samples 4 [326570, 1038242] processed_samples 1601 unjoint_samples 1600 joint_samples 4 [326570, 1038242] processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1047018, 923612] processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1047254, 1047923] processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1047254, 1047923] [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e2299fa540] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1047254, 235121] [h264 @ 0x55e2299b4ec0] mmco: unref short failure processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1047254, 235121] processed_samples 1700 unjoint_samples 1700 joint_samples 4 [480590, 1040076] processed_samples 1700 unjoint_samples 1700 joint_samples 4 [480590, 1040076] processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1047018, 139667] processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1047386, 833354] processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1047018, 139667] processed_samples 1700 unjoint_samples 1700 joint_samples 4 [306424, 1045232] processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1047386, 833354] processed_samples 1701 unjoint_samples 1700 joint_samples 4 [275790, 1047677] processed_samples 1700 unjoint_samples 1700 joint_samples 4 [306424, 1045232] processed_samples 1701 unjoint_samples 1700 joint_samples 4 [275790, 1047677] processed_samples 1701 unjoint_samples 1700 joint_samples 4 [710424, 1038242] [h264 @ 0x56128e0636c0] mmco: unref short failure processed_samples 1701 unjoint_samples 1700 joint_samples 4 [710424, 1038242] [h264 @ 0x55e229d0e180] mmco: unref short failure processed_samples 1700 unjoint_samples 1700 joint_samples 3 [1042381, 1017515] processed_samples 1700 unjoint_samples 1700 joint_samples 3 [1042381, 1017515] [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128b6efc40] [h264 @ 0x55e22bb40580] mmco: unref short failure mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure processed_samples 1800 unjoint_samples 1800 joint_samples 5 [100143, 1045094] processed_samples 1800 unjoint_samples 1800 joint_samples 5 [100143, 1045094] processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1047018, 418390] processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1047018, 418390] processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1047254, 540431] processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1047254, 540431] processed_samples 1800 unjoint_samples 1800 joint_samples 4 [324915, 1041240] processed_samples 1800 unjoint_samples 1800 joint_samples 4 [324915, 1041240] [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 1801 unjoint_samples 1800 joint_samples 5 [1048044, 24994] processed_samples 1801 unjoint_samples 1800 joint_samples 5 [1048044, 24994] processed_samples 1800 unjoint_samples 1800 joint_samples 4 [842119, 1040076] processed_samples 1800 unjoint_samples 1800 joint_samples 4 [842119, 1040076] [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561287d24780] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561287d24780] mmco: unref short failure [h264 @ 0x561287d24780] mmco: unref short failure processed_samples 1801 unjoint_samples 1800 joint_samples 4 [663997, 1047677] processed_samples 1801 unjoint_samples 1800 joint_samples 4 [663997, 1047677] processed_samples 1800 unjoint_samples 1800 joint_samples 4 [544594, 1045232] processed_samples 1800 unjoint_samples 1800 joint_samples 4 [544594, 1045232] [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1047018, 742731] processed_samples 1900 unjoint_samples 1900 joint_samples 5 [55867, 1040491] processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1047018, 742731] processed_samples 1900 unjoint_samples 1900 joint_samples 5 [55867, 1040491] processed_samples 1900 unjoint_samples 1900 joint_samples 5 [372981, 1045094] processed_samples 1900 unjoint_samples 1900 joint_samples 5 [372981, 1045094] processed_samples 1900 unjoint_samples 1900 joint_samples 4 [631294, 1041240] processed_samples 1900 unjoint_samples 1900 joint_samples 4 [631294, 1041240] processed_samples 1900 unjoint_samples 1900 joint_samples 4 [975121, 1045232] processed_samples 1900 unjoint_samples 1900 joint_samples 4 [975121, 1045232] processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1047254, 893785] processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1047254, 893785] processed_samples 1901 unjoint_samples 1900 joint_samples 4 [896400, 1047677] processed_samples 1901 unjoint_samples 1900 joint_samples 4 [896400, 1047677] processed_samples 1901 unjoint_samples 1900 joint_samples 5 [1048044, 345670] processed_samples 1901 unjoint_samples 1900 joint_samples 5 [1048044, 345670] [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561287d24780] mmco: unref short failure [h264 @ 0x561287d24780] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure processed_samples 2001 unjoint_samples 2000 joint_samples 5 [172544, 1047677] processed_samples 2001 unjoint_samples 2000 joint_samples 5 [172544, 1047677] [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure processed_samples 2000 unjoint_samples 2000 joint_samples 4 [935492, 1041240] [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure processed_samples 2000 unjoint_samples 2000 joint_samples 4 [935492, 1041240] processed_samples 2000 unjoint_samples 2000 joint_samples 5 [131676, 1047087] processed_samples 2000 unjoint_samples 2000 joint_samples 5 [271897, 1048208] processed_samples 2000 unjoint_samples 2000 joint_samples 5 [271897, 1048208] processed_samples 2000 unjoint_samples 2000 joint_samples 5 [131676, 1047087] processed_samples 2000 unjoint_samples 2000 joint_samples 5 [387492, 1040491] processed_samples 2000 unjoint_samples 2000 joint_samples 5 [387492, 1040491] processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1047018, 1013559] processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1047018, 1013559] processed_samples 2000 unjoint_samples 2000 joint_samples 5 [712122, 1045094] processed_samples 2000 unjoint_samples 2000 joint_samples 5 [712122, 1045094] processed_samples 2001 unjoint_samples 2000 joint_samples 5 [1048044, 671558] processed_samples 2001 unjoint_samples 2000 joint_samples 5 [1048044, 671558] [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22aae1740] [h264 @ 0x561289fb1cc0] mmco: unref short failure mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure processed_samples 2100 unjoint_samples 2100 joint_samples 5 [137673, 1045908] processed_samples 2100 unjoint_samples 2100 joint_samples 5 [137673, 1045908] processed_samples 2100 unjoint_samples 2100 joint_samples 5 [309648, 1030476] processed_samples 2100 unjoint_samples 2100 joint_samples 5 [309648, 1030476] processed_samples 2100 unjoint_samples 2100 joint_samples 5 [513014, 1048208] processed_samples 2100 unjoint_samples 2100 joint_samples 5 [513014, 1048208] processed_samples 2100 unjoint_samples 2100 joint_samples 5 [494495, 1047087] processed_samples 2100 unjoint_samples 2100 joint_samples 5 [494495, 1047087] processed_samples 2100 unjoint_samples 2100 joint_samples 5 [713881, 1040491] processed_samples 2100 unjoint_samples 2100 joint_samples 5 [713881, 1040491] processed_samples 2100 unjoint_samples 2100 joint_samples 6 [1013241, 137199] processed_samples 2100 unjoint_samples 2100 joint_samples 6 [1013241, 137199] [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure processed_samples 2101 unjoint_samples 2100 joint_samples 5 [511377, 1047677] processed_samples 2101 unjoint_samples 2100 joint_samples 5 [511377, 1047677] [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure processed_samples 2101 unjoint_samples 2100 joint_samples 5 [1048044, 987567] processed_samples 2101 unjoint_samples 2100 joint_samples 5 [1048044, 987567] [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x561287d24780] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure processed_samples 2200 unjoint_samples 2200 joint_samples 6 [1045917, 80537] [h264 @ 0x56128884fb40] mmco: unref short failure processed_samples 2200 unjoint_samples 2200 joint_samples 6 [1045917, 80537] processed_samples 2200 unjoint_samples 2200 joint_samples 5 [681039, 1030476] processed_samples 2200 unjoint_samples 2200 joint_samples 6 [1013241, 467123] processed_samples 2200 unjoint_samples 2200 joint_samples 5 [371447, 1045908] processed_samples 2200 unjoint_samples 2200 joint_samples 5 [681039, 1030476] processed_samples 2200 unjoint_samples 2200 joint_samples 6 [1013241, 467123] processed_samples 2200 unjoint_samples 2200 joint_samples 5 [371447, 1045908] processed_samples 2201 unjoint_samples 2200 joint_samples 5 [802590, 1047677] processed_samples 2201 unjoint_samples 2200 joint_samples 6 [1048264, 148370] processed_samples 2201 unjoint_samples 2200 joint_samples 5 [802590, 1047677] processed_samples 2201 unjoint_samples 2200 joint_samples 6 [1048264, 148370] processed_samples 2200 unjoint_samples 2200 joint_samples 5 [869315, 1048208] processed_samples 2200 unjoint_samples 2200 joint_samples 5 [701507, 1047087] processed_samples 2200 unjoint_samples 2200 joint_samples 5 [701507, 1047087] [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure processed_samples 2200 unjoint_samples 2200 joint_samples 5 [869315, 1048208] [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1047343, 177365] processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1047343, 177365] processed_samples 2301 unjoint_samples 2300 joint_samples 6 [1030088, 94750] processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1045917, 406035] processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1045917, 406035] processed_samples 2301 unjoint_samples 2300 joint_samples 6 [1030088, 94750] processed_samples 2300 unjoint_samples 2300 joint_samples 5 [749716, 1045908] processed_samples 2300 unjoint_samples 2300 joint_samples 5 [749716, 1045908] processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1026149, 1030476] processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1026149, 1030476] processed_samples 2301 unjoint_samples 2300 joint_samples 6 [1048264, 465412] [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure processed_samples 2301 unjoint_samples 2300 joint_samples 6 [1048264, 465412] processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1017876, 1047087] processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1017876, 1047087] processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1013241, 766847] processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1013241, 766847] [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1045917, 646544] processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1045917, 646544] processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1046243, 284424] processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1046243, 284424] processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1037735, 508554] processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1037735, 508554] processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1047343, 475294] [h264 @ 0x55e22961e8c0] mmco: unref short failure processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1047343, 475294] [h264 @ 0x5612898c5680] mmco: unref short failure processed_samples 2401 unjoint_samples 2400 joint_samples 6 [1030088, 460496] processed_samples 2401 unjoint_samples 2400 joint_samples 6 [1030088, 460496] [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1030156, 1029122] processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1030156, 1029122] processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1048000, 4536] processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1048000, 4536] processed_samples 2401 unjoint_samples 2400 joint_samples 6 [1048264, 715203] processed_samples 2401 unjoint_samples 2400 joint_samples 6 [1048264, 715203] [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [mov,mp4,m4a,3gp,3g2,mj2 @ 0x56128f2cbe00] stream 0, offset 0x900676: partial file [mov,mp4,m4a,3gp,3g2,mj2 @ 0x55e22914b880] stream 0, offset 0x900676: partial file [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1048000, 362059] processed_samples 2500 unjoint_samples 2500 joint_samples 7 [1030156, 483998] processed_samples 2500 unjoint_samples 2500 joint_samples 7 [1030156, 483998] processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1048000, 362059] processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1046243, 625554] processed_samples 2501 unjoint_samples 2500 joint_samples 7 [112418, 1036695] processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1046243, 625554] processed_samples 2501 unjoint_samples 2500 joint_samples 7 [112418, 1036695] processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1037735, 822074] processed_samples 2501 unjoint_samples 2500 joint_samples 6 [1047343, 785574] processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1037735, 822074] processed_samples 2501 unjoint_samples 2500 joint_samples 6 [1047343, 785574] processed_samples 2501 unjoint_samples 2500 joint_samples 6 [1030088, 738139] processed_samples 2501 unjoint_samples 2500 joint_samples 6 [1030088, 738139] processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1045917, 985285] processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1045917, 985285] [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure processed_samples 2600 unjoint_samples 2600 joint_samples 7 [45179, 1046923] processed_samples 2600 unjoint_samples 2600 joint_samples 7 [205945, 1048216] processed_samples 2600 unjoint_samples 2600 joint_samples 7 [205945, 1048216] processed_samples 2600 unjoint_samples 2600 joint_samples 7 [45179, 1046923] processed_samples 2601 unjoint_samples 2600 joint_samples 7 [121030, 1042340] processed_samples 2601 unjoint_samples 2600 joint_samples 7 [121030, 1042340] processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1048000, 750275] processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1048000, 750275] processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1046243, 955807] processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1046243, 955807] processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1030156, 725003] processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1030156, 725003] processed_samples 2601 unjoint_samples 2600 joint_samples 7 [393147, 1036695] processed_samples 2601 unjoint_samples 2600 joint_samples 7 [393147, 1036695] processed_samples 2601 unjoint_samples 2600 joint_samples 6 [1030088, 984450] processed_samples 2601 unjoint_samples 2600 joint_samples 6 [1030088, 984450] [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure processed_samples 2700 unjoint_samples 2700 joint_samples 6 [1048000, 1023513] processed_samples 2700 unjoint_samples 2700 joint_samples 6 [1048000, 1023513] [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure processed_samples 2700 unjoint_samples 2700 joint_samples 7 [540752, 1048216] processed_samples 2700 unjoint_samples 2700 joint_samples 7 [540752, 1048216] processed_samples 2700 unjoint_samples 2700 joint_samples 7 [215252, 1047022] processed_samples 2700 unjoint_samples 2700 joint_samples 7 [215252, 1047022] [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure processed_samples 2700 unjoint_samples 2700 joint_samples 8 [41203, 1033029] processed_samples 2700 unjoint_samples 2700 joint_samples 8 [41203, 1033029] processed_samples 2701 unjoint_samples 2700 joint_samples 7 [382659, 1025328] processed_samples 2701 unjoint_samples 2700 joint_samples 7 [382659, 1025328] processed_samples 2700 unjoint_samples 2700 joint_samples 7 [279311, 1046923] processed_samples 2700 unjoint_samples 2700 joint_samples 7 [279311, 1046923] processed_samples 2701 unjoint_samples 2700 joint_samples 7 [328885, 1042340] processed_samples 2701 unjoint_samples 2700 joint_samples 7 [328885, 1042340] processed_samples 2701 unjoint_samples 2700 joint_samples 7 [768752, 1036695] processed_samples 2701 unjoint_samples 2700 joint_samples 7 [768752, 1036695] [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure processed_samples 2800 unjoint_samples 2800 joint_samples 7 [405904, 1039367] processed_samples 2800 unjoint_samples 2800 joint_samples 7 [518486, 1047022] processed_samples 2800 unjoint_samples 2800 joint_samples 7 [405904, 1039367] processed_samples 2800 unjoint_samples 2800 joint_samples 7 [835295, 1048216] processed_samples 2800 unjoint_samples 2800 joint_samples 7 [518486, 1047022] processed_samples 2800 unjoint_samples 2800 joint_samples 7 [576138, 1046923] processed_samples 2800 unjoint_samples 2800 joint_samples 7 [835295, 1048216] processed_samples 2800 unjoint_samples 2800 joint_samples 7 [576138, 1046923] processed_samples 2801 unjoint_samples 2800 joint_samples 7 [690263, 1042340] processed_samples 2801 unjoint_samples 2800 joint_samples 7 [760218, 1025328] processed_samples 2800 unjoint_samples 2800 joint_samples 8 [296930, 1033029] processed_samples 2800 unjoint_samples 2800 joint_samples 8 [296930, 1033029] processed_samples 2801 unjoint_samples 2800 joint_samples 7 [690263, 1042340] processed_samples 2801 unjoint_samples 2800 joint_samples 7 [760218, 1025328] processed_samples 2801 unjoint_samples 2800 joint_samples 7 [1040788, 1041285] processed_samples 2801 unjoint_samples 2800 joint_samples 7 [1040788, 1041285] [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure processed_samples 2900 unjoint_samples 2900 joint_samples 8 [1045254, 88569] processed_samples 2900 unjoint_samples 2900 joint_samples 8 [1045254, 88569] processed_samples 2900 unjoint_samples 2900 joint_samples 7 [646902, 1039367] processed_samples 2900 unjoint_samples 2900 joint_samples 7 [646902, 1039367] processed_samples 2900 unjoint_samples 2900 joint_samples 7 [841723, 1047022] processed_samples 2900 unjoint_samples 2900 joint_samples 7 [841723, 1047022] processed_samples 2900 unjoint_samples 2900 joint_samples 8 [604307, 1033029] processed_samples 2900 unjoint_samples 2900 joint_samples 8 [604307, 1033029] processed_samples 2901 unjoint_samples 2900 joint_samples 8 [1030448, 39107] processed_samples 2901 unjoint_samples 2900 joint_samples 8 [1030448, 39107] processed_samples 2901 unjoint_samples 2900 joint_samples 8 [1045383, 292093] processed_samples 2901 unjoint_samples 2900 joint_samples 8 [1045383, 292093] processed_samples 2900 unjoint_samples 2900 joint_samples 7 [884455, 1046923] processed_samples 2900 unjoint_samples 2900 joint_samples 7 [884455, 1046923] processed_samples 2901 unjoint_samples 2900 joint_samples 8 [23331, 1036021] processed_samples 2901 unjoint_samples 2900 joint_samples 8 [23331, 1036021] [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1046722, 17533] processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1046722, 17533] processed_samples 3000 unjoint_samples 3000 joint_samples 8 [928842, 1033029] processed_samples 3000 unjoint_samples 3000 joint_samples 8 [928842, 1033029] processed_samples 3000 unjoint_samples 3000 joint_samples 8 [91754, 1046923] processed_samples 3000 unjoint_samples 3000 joint_samples 8 [91754, 1046923] processed_samples 3001 unjoint_samples 3000 joint_samples 8 [323427, 1036021] processed_samples 3001 unjoint_samples 3000 joint_samples 8 [323427, 1036021] processed_samples 3001 unjoint_samples 3000 joint_samples 8 [1030448, 343456] processed_samples 3001 unjoint_samples 3000 joint_samples 8 [1030448, 343456] processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1045254, 428614] processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1045254, 428614] processed_samples 3001 unjoint_samples 3000 joint_samples 8 [1045383, 626006] processed_samples 3001 unjoint_samples 3000 joint_samples 8 [1045383, 626006] processed_samples 3000 unjoint_samples 3000 joint_samples 7 [889351, 1039367] processed_samples 3000 unjoint_samples 3000 joint_samples 7 [889351, 1039367] [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229bee340] Missing reference picture, default is 65530 [h264 @ 0x55e229bee340] Missing reference picture, default is 65530 [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] Missing reference picture, default is 65530 [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] Missing reference picture, default is 65530 [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128f3de880] Missing reference picture, default is 65530 [h264 @ 0x56128f3de880] Missing reference picture, default is 65530 [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] Missing reference picture, default is 65530 [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] Missing reference picture, default is 65530 [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1045254, 773246] processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1021964, 161897] processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1045254, 773246] processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1021964, 161897] processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1046722, 369048] processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1046722, 369048] processed_samples 3100 unjoint_samples 3100 joint_samples 8 [404846, 1046923] processed_samples 3100 unjoint_samples 3100 joint_samples 8 [404846, 1046923] processed_samples 3100 unjoint_samples 3100 joint_samples 9 [185498, 1038599] processed_samples 3100 unjoint_samples 3100 joint_samples 9 [185498, 1038599] processed_samples 3101 unjoint_samples 3100 joint_samples 8 [607484, 1036021] processed_samples 3101 unjoint_samples 3100 joint_samples 8 [607484, 1036021] processed_samples 3101 unjoint_samples 3100 joint_samples 8 [1030448, 673765] processed_samples 3101 unjoint_samples 3100 joint_samples 8 [1030448, 673765] [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure processed_samples 3101 unjoint_samples 3100 joint_samples 8 [1045383, 908940] processed_samples 3101 unjoint_samples 3100 joint_samples 8 [1045383, 908940] [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] [h264 @ 0x55e229710280] mmco: unref short failure mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128cbb0f80] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure processed_samples 3200 unjoint_samples 3200 joint_samples 9 [58933, 1030513] processed_samples 3200 unjoint_samples 3200 joint_samples 9 [58933, 1030513] processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1021964, 456191] processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1021964, 456191] processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1046722, 666423] processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1046722, 666423] processed_samples 3201 unjoint_samples 3200 joint_samples 9 [246014, 1046718] processed_samples 3201 unjoint_samples 3200 joint_samples 9 [246014, 1046718] processed_samples 3200 unjoint_samples 3200 joint_samples 9 [442230, 1038599] processed_samples 3200 unjoint_samples 3200 joint_samples 9 [442230, 1038599] processed_samples 3201 unjoint_samples 3200 joint_samples 8 [1030448, 940617] processed_samples 3201 unjoint_samples 3200 joint_samples 8 [1030448, 940617] processed_samples 3200 unjoint_samples 3200 joint_samples 8 [746127, 1046923] processed_samples 3200 unjoint_samples 3200 joint_samples 8 [746127, 1046923] processed_samples 3201 unjoint_samples 3200 joint_samples 8 [904972, 1036021] processed_samples 3201 unjoint_samples 3200 joint_samples 8 [904972, 1036021] [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1021964, 779552] processed_samples 3300 unjoint_samples 3300 joint_samples 9 [328940, 1030513] [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1021964, 779552] [h264 @ 0x561288faff40] mmco: unref short failure processed_samples 3301 unjoint_samples 3300 joint_samples 9 [203465, 1008876] [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure processed_samples 3300 unjoint_samples 3300 joint_samples 9 [328940, 1030513] processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1033930, 1046923] processed_samples 3301 unjoint_samples 3300 joint_samples 9 [518812, 1046718] processed_samples 3300 unjoint_samples 3300 joint_samples 9 [696602, 1038599] processed_samples 3301 unjoint_samples 3300 joint_samples 9 [121910, 1046609] [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1046722, 993764] processed_samples 3301 unjoint_samples 3300 joint_samples 9 [203465, 1008876] processed_samples 3301 unjoint_samples 3300 joint_samples 9 [121910, 1046609] processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1033930, 1046923] processed_samples 3300 unjoint_samples 3300 joint_samples 9 [696602, 1038599] processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1046722, 993764] processed_samples 3301 unjoint_samples 3300 joint_samples 9 [518812, 1046718] [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure processed_samples 3400 unjoint_samples 3400 joint_samples 9 [355585, 1048033] processed_samples 3400 unjoint_samples 3400 joint_samples 9 [355585, 1048033] processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1046722, 209796] processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1021964, 177076] processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1021964, 177076] processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1046722, 209796] processed_samples 3400 unjoint_samples 3400 joint_samples 9 [600330, 1030513] processed_samples 3400 unjoint_samples 3400 joint_samples 9 [600330, 1030513] processed_samples 3401 unjoint_samples 3400 joint_samples 9 [452611, 1008876] processed_samples 3401 unjoint_samples 3400 joint_samples 9 [452611, 1008876] processed_samples 3401 unjoint_samples 3400 joint_samples 9 [466004, 1046609] processed_samples 3401 unjoint_samples 3400 joint_samples 9 [466004, 1046609] processed_samples 3401 unjoint_samples 3400 joint_samples 9 [796266, 1046718] processed_samples 3401 unjoint_samples 3400 joint_samples 9 [796266, 1046718] [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure processed_samples 3400 unjoint_samples 3400 joint_samples 9 [946602, 1038599] processed_samples 3400 unjoint_samples 3400 joint_samples 9 [946602, 1038599] [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure processed_samples 3500 unjoint_samples 3500 joint_samples 9 [895311, 1030513] processed_samples 3500 unjoint_samples 3500 joint_samples 9 [895311, 1030513] processed_samples 3501 unjoint_samples 3500 joint_samples 10 [9095, 1046785] processed_samples 3501 unjoint_samples 3500 joint_samples 10 [9095, 1046785] processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1046722, 521420] processed_samples 3500 unjoint_samples 3500 joint_samples 10 [1042412, 166441] processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1021964, 446775] processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1046722, 521420] processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1021964, 446775] processed_samples 3500 unjoint_samples 3500 joint_samples 10 [1042412, 166441] processed_samples 3500 unjoint_samples 3500 joint_samples 9 [599804, 1048033] processed_samples 3500 unjoint_samples 3500 joint_samples 9 [599804, 1048033] processed_samples 3501 unjoint_samples 3500 joint_samples 9 [855613, 1046609] [h264 @ 0x55e229eee4c0] mmco: unref short failure processed_samples 3501 unjoint_samples 3500 joint_samples 9 [855613, 1046609] [h264 @ 0x56128d17f380] mmco: unref short failure processed_samples 3501 unjoint_samples 3500 joint_samples 9 [679934, 1008876] processed_samples 3501 unjoint_samples 3500 joint_samples 9 [679934, 1008876] [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1047536, 177347] processed_samples 3600 unjoint_samples 3600 joint_samples 9 [977126, 1048033] processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1042412, 412010] processed_samples 3600 unjoint_samples 3600 joint_samples 9 [977126, 1048033] processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1042412, 412010] processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1047536, 177347] processed_samples 3601 unjoint_samples 3600 joint_samples 10 [84468, 1046609] processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1046722, 826719] processed_samples 3601 unjoint_samples 3600 joint_samples 10 [322566, 1046785] processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1046722, 826719] processed_samples 3601 unjoint_samples 3600 joint_samples 10 [84468, 1046609] processed_samples 3601 unjoint_samples 3600 joint_samples 10 [322566, 1046785] processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1021964, 721974] processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1021964, 721974] processed_samples 3601 unjoint_samples 3600 joint_samples 9 [961186, 1008876] processed_samples 3601 unjoint_samples 3600 joint_samples 9 [961186, 1008876] [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e228c81180] Missing reference picture, default is 65530 [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x561289dbaec0] Missing reference picture, default is 65530 [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1047536, 481973] processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1047536, 481973] processed_samples 3700 unjoint_samples 3700 joint_samples 10 [100774, 1025248] processed_samples 3701 unjoint_samples 3700 joint_samples 10 [184227, 1031670] processed_samples 3700 unjoint_samples 3700 joint_samples 10 [100774, 1025248] processed_samples 3701 unjoint_samples 3700 joint_samples 10 [184227, 1031670] processed_samples 3700 unjoint_samples 3700 joint_samples 10 [180316, 1048033] processed_samples 3700 unjoint_samples 3700 joint_samples 10 [180316, 1048033] processed_samples 3700 unjoint_samples 3700 joint_samples 9 [1026642, 1024926] processed_samples 3700 unjoint_samples 3700 joint_samples 9 [1026642, 1024926] processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1042412, 641067] processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1042412, 641067] processed_samples 3701 unjoint_samples 3700 joint_samples 10 [430293, 1046609] processed_samples 3701 unjoint_samples 3700 joint_samples 10 [430293, 1046609] [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure processed_samples 3701 unjoint_samples 3700 joint_samples 10 [637133, 1046785] processed_samples 3701 unjoint_samples 3700 joint_samples 10 [637133, 1046785] [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047451, 306665] processed_samples 3800 unjoint_samples 3800 joint_samples 10 [459799, 1025248] processed_samples 3800 unjoint_samples 3800 joint_samples 10 [559840, 1048033] processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047536, 781745] processed_samples 3801 unjoint_samples 3800 joint_samples 10 [520437, 1031670] processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047451, 306665] processed_samples 3801 unjoint_samples 3800 joint_samples 10 [747929, 1046609] processed_samples 3800 unjoint_samples 3800 joint_samples 10 [559840, 1048033] processed_samples 3800 unjoint_samples 3800 joint_samples 10 [459799, 1025248] processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1042412, 931910] [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047536, 781745] [h264 @ 0x56128cd84c00] mmco: unref short failure processed_samples 3801 unjoint_samples 3800 joint_samples 10 [747929, 1046609] processed_samples 3801 unjoint_samples 3800 joint_samples 10 [520437, 1031670] [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1042412, 931910] processed_samples 3801 unjoint_samples 3800 joint_samples 10 [967502, 1046785] [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure processed_samples 3801 unjoint_samples 3800 joint_samples 10 [967502, 1046785] [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1047451, 584412] processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1047451, 584412] processed_samples 3900 unjoint_samples 3900 joint_samples 11 [56096, 1031969] processed_samples 3900 unjoint_samples 3900 joint_samples 11 [56096, 1031969] processed_samples 3900 unjoint_samples 3900 joint_samples 10 [781059, 1025248] processed_samples 3900 unjoint_samples 3900 joint_samples 10 [781059, 1025248] processed_samples 3901 unjoint_samples 3900 joint_samples 11 [1046348, 15997] processed_samples 3901 unjoint_samples 3900 joint_samples 11 [1046348, 15997] processed_samples 3900 unjoint_samples 3900 joint_samples 11 [1043406, 163709] processed_samples 3900 unjoint_samples 3900 joint_samples 11 [1043406, 163709] processed_samples 3901 unjoint_samples 3900 joint_samples 10 [787997, 1031670] processed_samples 3901 unjoint_samples 3900 joint_samples 10 [787997, 1031670] processed_samples 3901 unjoint_samples 3900 joint_samples 11 [1036375, 248529] processed_samples 3901 unjoint_samples 3900 joint_samples 11 [1036375, 248529] processed_samples 3900 unjoint_samples 3900 joint_samples 10 [925752, 1048033] [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure processed_samples 3900 unjoint_samples 3900 joint_samples 10 [925752, 1048033] [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561287ddbd00] mmco: unref short failure [h264 @ 0x561287ddbd00] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x5612893e2d40] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e22a6fb880] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1047888, 133116] processed_samples 4000 unjoint_samples 4000 joint_samples 11 [384742, 1031969] processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1047888, 133116] processed_samples 4000 unjoint_samples 4000 joint_samples 11 [384742, 1031969] processed_samples 4000 unjoint_samples 4000 joint_samples 10 [1047451, 836466] processed_samples 4000 unjoint_samples 4000 joint_samples 10 [1047451, 836466] processed_samples 4001 unjoint_samples 4000 joint_samples 11 [1036375, 502369] processed_samples 4000 unjoint_samples 4000 joint_samples 10 [1027819, 1032420] processed_samples 4000 unjoint_samples 4000 joint_samples 10 [1027819, 1032420] processed_samples 4001 unjoint_samples 4000 joint_samples 11 [1036375, 502369] processed_samples 4001 unjoint_samples 4000 joint_samples 11 [1046348, 287522] processed_samples 4001 unjoint_samples 4000 joint_samples 11 [1046348, 287522] processed_samples 4001 unjoint_samples 4000 joint_samples 10 [1045645, 1045545] [h264 @ 0x55e229d0e180] mmco: unref short failure processed_samples 4001 unjoint_samples 4000 joint_samples 10 [1045645, 1045545] processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1043406, 431311] processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1043406, 431311] [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure processed_samples 4100 unjoint_samples 4100 joint_samples 11 [118378, 1047424] processed_samples 4100 unjoint_samples 4100 joint_samples 11 [118378, 1047424] processed_samples 4101 unjoint_samples 4100 joint_samples 11 [412400, 1045545] processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1046364, 259602] processed_samples 4101 unjoint_samples 4100 joint_samples 11 [412400, 1045545] processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1046364, 259602] processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1047888, 412741] processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1047888, 412741] processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1043406, 727427] processed_samples 4100 unjoint_samples 4100 joint_samples 11 [717256, 1031969] processed_samples 4100 unjoint_samples 4100 joint_samples 11 [717256, 1031969] processed_samples 4101 unjoint_samples 4100 joint_samples 11 [1046348, 560468] processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1043406, 727427] processed_samples 4101 unjoint_samples 4100 joint_samples 11 [1046348, 560468] [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure processed_samples 4101 unjoint_samples 4100 joint_samples 11 [1036375, 833960] processed_samples 4101 unjoint_samples 4100 joint_samples 11 [1036375, 833960] [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1046364, 559216] processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1046364, 559216] [h264 @ 0x561288faff40] mmco: unref short failure processed_samples 4200 unjoint_samples 4200 joint_samples 11 [387548, 1047424] [h264 @ 0x55e229eee4c0] mmco: unref short failure processed_samples 4200 unjoint_samples 4200 joint_samples 11 [387548, 1047424] processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1047888, 760034] processed_samples 4201 unjoint_samples 4200 joint_samples 11 [761977, 1045545] processed_samples 4201 unjoint_samples 4200 joint_samples 12 [1045710, 63157] processed_samples 4201 unjoint_samples 4200 joint_samples 11 [1046348, 828850] processed_samples 4201 unjoint_samples 4200 joint_samples 11 [761977, 1045545] processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1047888, 760034] processed_samples 4201 unjoint_samples 4200 joint_samples 12 [1045710, 63157] processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1043406, 1032158] processed_samples 4200 unjoint_samples 4200 joint_samples 11 [955862, 1031969] processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1043406, 1032158] processed_samples 4200 unjoint_samples 4200 joint_samples 11 [955862, 1031969] processed_samples 4201 unjoint_samples 4200 joint_samples 11 [1046348, 828850] [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1046364, 840057] processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1046364, 840057] processed_samples 4300 unjoint_samples 4300 joint_samples 12 [132317, 1047369] processed_samples 4300 unjoint_samples 4300 joint_samples 12 [132317, 1047369] processed_samples 4300 unjoint_samples 4300 joint_samples 12 [283311, 1040014] processed_samples 4300 unjoint_samples 4300 joint_samples 12 [283311, 1040014] processed_samples 4300 unjoint_samples 4300 joint_samples 12 [23511, 1047457] processed_samples 4300 unjoint_samples 4300 joint_samples 12 [23511, 1047457] processed_samples 4300 unjoint_samples 4300 joint_samples 11 [702287, 1047424] processed_samples 4300 unjoint_samples 4300 joint_samples 11 [702287, 1047424] processed_samples 4301 unjoint_samples 4300 joint_samples 12 [1046348, 12424] processed_samples 4301 unjoint_samples 4300 joint_samples 12 [1046348, 12424] processed_samples 4301 unjoint_samples 4300 joint_samples 12 [1045710, 284785] processed_samples 4301 unjoint_samples 4300 joint_samples 12 [1045710, 284785] processed_samples 4301 unjoint_samples 4300 joint_samples 11 [1025495, 1045545] processed_samples 4301 unjoint_samples 4300 joint_samples 11 [1025495, 1045545] [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure processed_samples 4400 unjoint_samples 4400 joint_samples 12 [173026, 1025460] processed_samples 4400 unjoint_samples 4400 joint_samples 12 [173026, 1025460] processed_samples 4400 unjoint_samples 4400 joint_samples 12 [594158, 1040014] processed_samples 4400 unjoint_samples 4400 joint_samples 12 [594158, 1040014] [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure processed_samples 4400 unjoint_samples 4400 joint_samples 12 [400908, 1047369] processed_samples 4400 unjoint_samples 4400 joint_samples 12 [400908, 1047369] processed_samples 4400 unjoint_samples 4400 joint_samples 12 [373435, 1047457] processed_samples 4400 unjoint_samples 4400 joint_samples 12 [373435, 1047457] processed_samples 4401 unjoint_samples 4400 joint_samples 12 [1046348, 408242] processed_samples 4401 unjoint_samples 4400 joint_samples 12 [1046348, 408242] processed_samples 4401 unjoint_samples 4400 joint_samples 12 [1046314, 240106] processed_samples 4401 unjoint_samples 4400 joint_samples 12 [1046314, 240106] processed_samples 4400 unjoint_samples 4400 joint_samples 11 [1000719, 1047424] processed_samples 4400 unjoint_samples 4400 joint_samples 11 [1000719, 1047424] processed_samples 4401 unjoint_samples 4400 joint_samples 12 [1045710, 581453] processed_samples 4401 unjoint_samples 4400 joint_samples 12 [1045710, 581453] [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure processed_samples 4500 unjoint_samples 4500 joint_samples 12 [691727, 1047369] processed_samples 4500 unjoint_samples 4500 joint_samples 12 [293772, 1048348] processed_samples 4500 unjoint_samples 4500 joint_samples 12 [499306, 1025460] [h264 @ 0x55e22a926d00] mmco: unref short failure processed_samples 4500 unjoint_samples 4500 joint_samples 12 [646236, 1047457] processed_samples 4501 unjoint_samples 4500 joint_samples 12 [1046314, 579205] processed_samples 4501 unjoint_samples 4500 joint_samples 12 [1046348, 769306] processed_samples 4500 unjoint_samples 4500 joint_samples 12 [926135, 1040014] processed_samples 4501 unjoint_samples 4500 joint_samples 12 [1045710, 908947] [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure processed_samples 4500 unjoint_samples 4500 joint_samples 12 [293772, 1048348] processed_samples 4500 unjoint_samples 4500 joint_samples 12 [691727, 1047369] processed_samples 4500 unjoint_samples 4500 joint_samples 12 [499306, 1025460] processed_samples 4500 unjoint_samples 4500 joint_samples 12 [646236, 1047457] processed_samples 4501 unjoint_samples 4500 joint_samples 12 [1046314, 579205] processed_samples 4500 unjoint_samples 4500 joint_samples 12 [926135, 1040014] processed_samples 4501 unjoint_samples 4500 joint_samples 12 [1046348, 769306] [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure processed_samples 4501 unjoint_samples 4500 joint_samples 12 [1045710, 908947] [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure processed_samples 4600 unjoint_samples 4600 joint_samples 12 [819651, 1025460] processed_samples 4600 unjoint_samples 4600 joint_samples 12 [819651, 1025460] processed_samples 4600 unjoint_samples 4600 joint_samples 12 [624895, 1048348] processed_samples 4600 unjoint_samples 4600 joint_samples 12 [624895, 1048348] processed_samples 4600 unjoint_samples 4600 joint_samples 13 [241424, 1046123] processed_samples 4600 unjoint_samples 4600 joint_samples 13 [241424, 1046123] processed_samples 4601 unjoint_samples 4600 joint_samples 13 [130432, 1045547] [h264 @ 0x56129017c880] mmco: unref short failure processed_samples 4601 unjoint_samples 4600 joint_samples 13 [130432, 1045547] [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 4601 unjoint_samples 4600 joint_samples 12 [1046314, 863958] processed_samples 4601 unjoint_samples 4600 joint_samples 12 [1046314, 863958] processed_samples 4600 unjoint_samples 4600 joint_samples 12 [946177, 1047457] processed_samples 4600 unjoint_samples 4600 joint_samples 12 [946177, 1047457] processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1033717, 25762] processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1033717, 25762] processed_samples 4601 unjoint_samples 4600 joint_samples 13 [309102, 1026781] processed_samples 4601 unjoint_samples 4600 joint_samples 13 [309102, 1026781] [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1033717, 334104] processed_samples 4701 unjoint_samples 4700 joint_samples 13 [1046314, 60067] processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1032842, 324986] processed_samples 4700 unjoint_samples 4700 joint_samples 13 [623983, 1046123] processed_samples 4700 unjoint_samples 4700 joint_samples 12 [870388, 1048348] processed_samples 4701 unjoint_samples 4700 joint_samples 13 [364297, 1045547] processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1033717, 334104] processed_samples 4701 unjoint_samples 4700 joint_samples 13 [1046314, 60067] processed_samples 4701 unjoint_samples 4700 joint_samples 13 [581783, 1026781] processed_samples 4700 unjoint_samples 4700 joint_samples 12 [1032486, 1031078] processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1032842, 324986] [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure processed_samples 4700 unjoint_samples 4700 joint_samples 13 [623983, 1046123] processed_samples 4700 unjoint_samples 4700 joint_samples 12 [870388, 1048348] processed_samples 4701 unjoint_samples 4700 joint_samples 13 [364297, 1045547] processed_samples 4701 unjoint_samples 4700 joint_samples 13 [581783, 1026781] processed_samples 4700 unjoint_samples 4700 joint_samples 12 [1032486, 1031078] [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x561287ddbd00] mmco: unref short failure [h264 @ 0x561287ddbd00] mmco: unref short failure [h264 @ 0x561287ddbd00] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure processed_samples 4801 unjoint_samples 4800 joint_samples 13 [775996, 1026781] processed_samples 4801 unjoint_samples 4800 joint_samples 13 [775996, 1026781] processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1047417, 83237] processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1047417, 83237] processed_samples 4800 unjoint_samples 4800 joint_samples 13 [325728, 1042728] processed_samples 4800 unjoint_samples 4800 joint_samples 13 [325728, 1042728] processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1032842, 662191] processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1033717, 659095] processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1032842, 662191] processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1033717, 659095] processed_samples 4801 unjoint_samples 4800 joint_samples 13 [1046314, 388950] [h264 @ 0x5612915aad40] mmco: unref short failure processed_samples 4800 unjoint_samples 4800 joint_samples 13 [899880, 1046123] processed_samples 4801 unjoint_samples 4800 joint_samples 13 [1046314, 388950] [h264 @ 0x55e22944e2c0] mmco: unref short failure processed_samples 4800 unjoint_samples 4800 joint_samples 13 [899880, 1046123] processed_samples 4801 unjoint_samples 4800 joint_samples 13 [698407, 1045547] processed_samples 4801 unjoint_samples 4800 joint_samples 13 [698407, 1045547] [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128898cf40] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561287ddbd00] mmco: unref short failure processed_samples 4900 unjoint_samples 4900 joint_samples 13 [804416, 1042728] processed_samples 4900 unjoint_samples 4900 joint_samples 13 [804416, 1042728] processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1047417, 414360] processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1047417, 414360] processed_samples 4901 unjoint_samples 4900 joint_samples 13 [1046314, 719886] processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1032842, 930397] processed_samples 4901 unjoint_samples 4900 joint_samples 13 [1046314, 719886] processed_samples 4900 unjoint_samples 4900 joint_samples 14 [197651, 1046123] processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1033717, 982758] processed_samples 4900 unjoint_samples 4900 joint_samples 14 [197651, 1046123] processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1032842, 930397] processed_samples 4901 unjoint_samples 4900 joint_samples 14 [1024381, 134882] processed_samples 4901 unjoint_samples 4900 joint_samples 14 [1024381, 134882] processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1033717, 982758] processed_samples 4901 unjoint_samples 4900 joint_samples 13 [1010760, 1026781] processed_samples 4901 unjoint_samples 4900 joint_samples 13 [1010760, 1026781] [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1040243, 55528] processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1040243, 55528] processed_samples 5000 unjoint_samples 5000 joint_samples 14 [140295, 1047590] processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1034114, 205267] processed_samples 5000 unjoint_samples 5000 joint_samples 14 [140295, 1047590] processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1034114, 205267] processed_samples 5000 unjoint_samples 5000 joint_samples 13 [1047417, 853174] processed_samples 5000 unjoint_samples 5000 joint_samples 13 [1047417, 853174] processed_samples 5000 unjoint_samples 5000 joint_samples 14 [470437, 1046123] processed_samples 5000 unjoint_samples 5000 joint_samples 14 [470437, 1046123] processed_samples 5001 unjoint_samples 5000 joint_samples 14 [1045692, 328955] processed_samples 5001 unjoint_samples 5000 joint_samples 14 [1045692, 328955] processed_samples 5001 unjoint_samples 5000 joint_samples 14 [1024381, 439406] processed_samples 5001 unjoint_samples 5000 joint_samples 14 [1024381, 439406] processed_samples 5001 unjoint_samples 5000 joint_samples 13 [1046314, 975542] processed_samples 5001 unjoint_samples 5000 joint_samples 13 [1046314, 975542] [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e2294557c0] mmco: unref short failure [h264 @ 0x55e2294557c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1040243, 385612] processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1040243, 385612] processed_samples 5100 unjoint_samples 5100 joint_samples 14 [720510, 1046123] processed_samples 5101 unjoint_samples 5100 joint_samples 14 [197323, 1032563] processed_samples 5101 unjoint_samples 5100 joint_samples 14 [197323, 1032563] processed_samples 5100 unjoint_samples 5100 joint_samples 14 [720510, 1046123] processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1047417, 193111] processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1047417, 193111] processed_samples 5100 unjoint_samples 5100 joint_samples 14 [372138, 1047590] processed_samples 5100 unjoint_samples 5100 joint_samples 14 [372138, 1047590] processed_samples 5101 unjoint_samples 5100 joint_samples 14 [1024381, 843359] processed_samples 5101 unjoint_samples 5100 joint_samples 14 [1024381, 843359] processed_samples 5101 unjoint_samples 5100 joint_samples 14 [1045692, 608862] processed_samples 5101 unjoint_samples 5100 joint_samples 14 [1045692, 608862] processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1034114, 527198] processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1034114, 527198] [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1040243, 663442] processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1040243, 663442] processed_samples 5201 unjoint_samples 5200 joint_samples 14 [454020, 1032563] processed_samples 5201 unjoint_samples 5200 joint_samples 14 [454020, 1032563] processed_samples 5200 unjoint_samples 5200 joint_samples 15 [1046264, 86240] processed_samples 5200 unjoint_samples 5200 joint_samples 15 [1046264, 86240] processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1034114, 880778] processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1034114, 880778] processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1047417, 548957] processed_samples 5201 unjoint_samples 5200 joint_samples 15 [82380, 1047919] processed_samples 5201 unjoint_samples 5200 joint_samples 15 [82380, 1047919] processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1047417, 548957] [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure processed_samples 5200 unjoint_samples 5200 joint_samples 14 [692394, 1047590] processed_samples 5200 unjoint_samples 5200 joint_samples 14 [692394, 1047590] processed_samples 5201 unjoint_samples 5200 joint_samples 14 [1045692, 961860] processed_samples 5201 unjoint_samples 5200 joint_samples 14 [1045692, 961860] [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e228c78c80] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure processed_samples 5300 unjoint_samples 5300 joint_samples 15 [221637, 1017264] processed_samples 5300 unjoint_samples 5300 joint_samples 15 [221637, 1017264] processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1046911, 83572] processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1046911, 83572] [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1046264, 430735] processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1046264, 430735] processed_samples 5301 unjoint_samples 5300 joint_samples 15 [199556, 1047470] processed_samples 5301 unjoint_samples 5300 joint_samples 15 [199556, 1047470] processed_samples 5301 unjoint_samples 5300 joint_samples 15 [556721, 1047919] processed_samples 5301 unjoint_samples 5300 joint_samples 14 [770924, 1032563] processed_samples 5301 unjoint_samples 5300 joint_samples 15 [556721, 1047919] processed_samples 5301 unjoint_samples 5300 joint_samples 14 [770924, 1032563] processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1047417, 799171] processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1047417, 799171] processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1040243, 912174] processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1040243, 912174] [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128a1362c0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure processed_samples 5400 unjoint_samples 5400 joint_samples 15 [560876, 1017264] processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1040243, 315403] [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure processed_samples 5400 unjoint_samples 5400 joint_samples 15 [560876, 1017264] processed_samples 5400 unjoint_samples 5400 joint_samples 15 [18880, 1045088] processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1040243, 315403] processed_samples 5400 unjoint_samples 5400 joint_samples 15 [18880, 1045088] processed_samples 5401 unjoint_samples 5400 joint_samples 15 [822089, 1047919] processed_samples 5401 unjoint_samples 5400 joint_samples 15 [822089, 1047919] processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1046911, 314945] processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1046911, 314945] processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1046264, 694398] processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1046264, 694398] processed_samples 5401 unjoint_samples 5400 joint_samples 14 [1038164, 1039775] processed_samples 5401 unjoint_samples 5400 joint_samples 14 [1038164, 1039775] processed_samples 5401 unjoint_samples 5400 joint_samples 15 [475680, 1047470] processed_samples 5401 unjoint_samples 5400 joint_samples 15 [475680, 1047470] [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1040243, 587785] processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1040243, 587785] processed_samples 5500 unjoint_samples 5500 joint_samples 16 [173769, 962546] processed_samples 5500 unjoint_samples 5500 joint_samples 16 [173769, 962546] processed_samples 5500 unjoint_samples 5500 joint_samples 15 [307240, 1045088] processed_samples 5501 unjoint_samples 5500 joint_samples 16 [1025105, 269288] processed_samples 5501 unjoint_samples 5500 joint_samples 16 [1025105, 269288] [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure processed_samples 5500 unjoint_samples 5500 joint_samples 15 [814046, 1017264] processed_samples 5500 unjoint_samples 5500 joint_samples 15 [307240, 1045088] processed_samples 5500 unjoint_samples 5500 joint_samples 15 [814046, 1017264] processed_samples 5501 unjoint_samples 5500 joint_samples 15 [1040194, 373227] processed_samples 5501 unjoint_samples 5500 joint_samples 15 [1040194, 373227] processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1046911, 593782] processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1046911, 593782] processed_samples 5501 unjoint_samples 5500 joint_samples 15 [777243, 1047470] processed_samples 5501 unjoint_samples 5500 joint_samples 15 [777243, 1047470] [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure processed_samples 5600 unjoint_samples 5600 joint_samples 15 [537838, 1045088] processed_samples 5600 unjoint_samples 5600 joint_samples 16 [36613, 1033165] processed_samples 5600 unjoint_samples 5600 joint_samples 15 [537838, 1045088] processed_samples 5600 unjoint_samples 5600 joint_samples 16 [36613, 1033165] [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure processed_samples 5601 unjoint_samples 5600 joint_samples 16 [1025105, 560114] processed_samples 5601 unjoint_samples 5600 joint_samples 16 [1025105, 560114] processed_samples 5600 unjoint_samples 5600 joint_samples 16 [508611, 962546] processed_samples 5600 unjoint_samples 5600 joint_samples 16 [702968, 703081] processed_samples 5600 unjoint_samples 5600 joint_samples 16 [508611, 962546] processed_samples 5600 unjoint_samples 5600 joint_samples 16 [702968, 703081] processed_samples 5601 unjoint_samples 5600 joint_samples 15 [1040194, 616484] processed_samples 5601 unjoint_samples 5600 joint_samples 15 [1040194, 616484] processed_samples 5600 unjoint_samples 5600 joint_samples 15 [1046911, 873815] processed_samples 5600 unjoint_samples 5600 joint_samples 15 [1046911, 873815] processed_samples 5601 unjoint_samples 5600 joint_samples 15 [1034890, 1047470] processed_samples 5601 unjoint_samples 5600 joint_samples 15 [1034890, 1047470] [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure processed_samples 5700 unjoint_samples 5700 joint_samples 16 [163191, 1046149] processed_samples 5700 unjoint_samples 5700 joint_samples 16 [293044, 1033165] processed_samples 5700 unjoint_samples 5700 joint_samples 16 [856542, 962546] processed_samples 5701 unjoint_samples 5700 joint_samples 15 [1040194, 1004110] processed_samples 5701 unjoint_samples 5700 joint_samples 16 [1045259, 289483] processed_samples 5701 unjoint_samples 5700 joint_samples 16 [1025105, 801855] processed_samples 5700 unjoint_samples 5700 joint_samples 16 [163191, 1046149] processed_samples 5700 unjoint_samples 5700 joint_samples 16 [825983, 825372] processed_samples 5700 unjoint_samples 5700 joint_samples 16 [293044, 1033165] processed_samples 5700 unjoint_samples 5700 joint_samples 15 [804426, 1045088] processed_samples 5700 unjoint_samples 5700 joint_samples 16 [856542, 962546] processed_samples 5701 unjoint_samples 5700 joint_samples 15 [1040194, 1004110] processed_samples 5701 unjoint_samples 5700 joint_samples 16 [1025105, 801855] processed_samples 5700 unjoint_samples 5700 joint_samples 15 [804426, 1045088] processed_samples 5701 unjoint_samples 5700 joint_samples 16 [1045259, 289483] processed_samples 5700 unjoint_samples 5700 joint_samples 16 [825983, 825372] [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure processed_samples 5800 unjoint_samples 5800 joint_samples 16 [461254, 1046149] processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1011245, 110366] processed_samples 5800 unjoint_samples 5800 joint_samples 16 [570249, 1033165] processed_samples 5800 unjoint_samples 5800 joint_samples 16 [953087, 952946] processed_samples 5800 unjoint_samples 5800 joint_samples 16 [461254, 1046149] processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1011245, 110366] processed_samples 5801 unjoint_samples 5800 joint_samples 16 [259848, 1043929] processed_samples 5801 unjoint_samples 5800 joint_samples 16 [259848, 1043929] processed_samples 5800 unjoint_samples 5800 joint_samples 16 [570249, 1033165] processed_samples 5801 unjoint_samples 5800 joint_samples 17 [1037924, 101584] processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1035685, 1035382] processed_samples 5801 unjoint_samples 5800 joint_samples 16 [1045259, 555109] processed_samples 5801 unjoint_samples 5800 joint_samples 17 [1037924, 101584] processed_samples 5801 unjoint_samples 5800 joint_samples 16 [1045259, 555109] processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1035685, 1035382] processed_samples 5800 unjoint_samples 5800 joint_samples 16 [953087, 952946] [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1038820, 170495] processed_samples 5901 unjoint_samples 5900 joint_samples 17 [1037924, 440119] processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1037111, 281291] processed_samples 5900 unjoint_samples 5900 joint_samples 16 [1011245, 562301] processed_samples 5901 unjoint_samples 5900 joint_samples 16 [592573, 1043929] processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1038820, 170495] processed_samples 5900 unjoint_samples 5900 joint_samples 16 [722453, 1046149] processed_samples 5900 unjoint_samples 5900 joint_samples 16 [1011245, 562301] processed_samples 5900 unjoint_samples 5900 joint_samples 16 [907183, 1033165] processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1037111, 281291] processed_samples 5901 unjoint_samples 5900 joint_samples 16 [1045259, 982406] processed_samples 5901 unjoint_samples 5900 joint_samples 17 [1037924, 440119] processed_samples 5901 unjoint_samples 5900 joint_samples 16 [592573, 1043929] processed_samples 5900 unjoint_samples 5900 joint_samples 16 [722453, 1046149] processed_samples 5900 unjoint_samples 5900 joint_samples 16 [907183, 1033165] processed_samples 5901 unjoint_samples 5900 joint_samples 16 [1045259, 982406] [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1038820, 509931] processed_samples 6000 unjoint_samples 6000 joint_samples 17 [120314, 1047747] [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1046898, 162176] processed_samples 6000 unjoint_samples 6000 joint_samples 16 [1011245, 952509] [h264 @ 0x56128a018cc0] mmco: unref short failure processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1037111, 591550] processed_samples 6001 unjoint_samples 6000 joint_samples 17 [229585, 1038497] processed_samples 6001 unjoint_samples 6000 joint_samples 17 [1037924, 781606] [h264 @ 0x55e22d410bc0] mmco: unref short failure processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1038820, 509931] processed_samples 6000 unjoint_samples 6000 joint_samples 17 [120314, 1047747] processed_samples 6001 unjoint_samples 6000 joint_samples 16 [1043971, 1045549] [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1046898, 162176] [h264 @ 0x55e229510940] mmco: unref short failure processed_samples 6001 unjoint_samples 6000 joint_samples 17 [229585, 1038497] processed_samples 6000 unjoint_samples 6000 joint_samples 16 [1011245, 952509] processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1037111, 591550] processed_samples 6001 unjoint_samples 6000 joint_samples 17 [1037924, 781606] processed_samples 6001 unjoint_samples 6000 joint_samples 16 [1043971, 1045549] [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1038820, 992025] processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1038820, 992025] processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1046898, 462620] processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1046898, 462620] processed_samples 6100 unjoint_samples 6100 joint_samples 17 [238785, 1029586] [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure processed_samples 6100 unjoint_samples 6100 joint_samples 17 [238785, 1029586] processed_samples 6101 unjoint_samples 6100 joint_samples 18 [56410, 1048142] processed_samples 6101 unjoint_samples 6100 joint_samples 18 [56410, 1048142] processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1037111, 866499] processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1037111, 866499] processed_samples 6100 unjoint_samples 6100 joint_samples 17 [542996, 1047747] processed_samples 6100 unjoint_samples 6100 joint_samples 17 [542996, 1047747] processed_samples 6101 unjoint_samples 6100 joint_samples 17 [1046222, 364815] processed_samples 6101 unjoint_samples 6100 joint_samples 17 [1046222, 364815] [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure processed_samples 6101 unjoint_samples 6100 joint_samples 17 [497655, 1038497] [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure processed_samples 6101 unjoint_samples 6100 joint_samples 17 [497655, 1038497] [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x5612891da640] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure processed_samples 6200 unjoint_samples 6200 joint_samples 18 [1046524, 297577] processed_samples 6200 unjoint_samples 6200 joint_samples 18 [1046524, 297577] processed_samples 6200 unjoint_samples 6200 joint_samples 17 [603617, 1029586] processed_samples 6201 unjoint_samples 6200 joint_samples 18 [402816, 1048142] processed_samples 6201 unjoint_samples 6200 joint_samples 18 [402816, 1048142] processed_samples 6200 unjoint_samples 6200 joint_samples 17 [603617, 1029586] processed_samples 6200 unjoint_samples 6200 joint_samples 18 [186267, 1047371] processed_samples 6200 unjoint_samples 6200 joint_samples 18 [186267, 1047371] processed_samples 6201 unjoint_samples 6200 joint_samples 17 [1046222, 715581] processed_samples 6200 unjoint_samples 6200 joint_samples 17 [922597, 1047747] processed_samples 6200 unjoint_samples 6200 joint_samples 17 [922597, 1047747] processed_samples 6201 unjoint_samples 6200 joint_samples 17 [1046222, 715581] processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1046898, 784630] processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1046898, 784630] [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure processed_samples 6201 unjoint_samples 6200 joint_samples 17 [750494, 1038497] processed_samples 6201 unjoint_samples 6200 joint_samples 17 [750494, 1038497] [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046524, 634648] [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1045624, 258504] processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046524, 634648] [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1045624, 258504] processed_samples 6300 unjoint_samples 6300 joint_samples 18 [509650, 1047371] [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure processed_samples 6300 unjoint_samples 6300 joint_samples 18 [509650, 1047371] [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 6301 unjoint_samples 6300 joint_samples 18 [1047479, 21624] processed_samples 6301 unjoint_samples 6300 joint_samples 18 [1047479, 21624] processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046898, 145656] processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046898, 145656] processed_samples 6301 unjoint_samples 6300 joint_samples 18 [776181, 1048142] processed_samples 6300 unjoint_samples 6300 joint_samples 17 [859924, 1029586] processed_samples 6301 unjoint_samples 6300 joint_samples 18 [776181, 1048142] processed_samples 6300 unjoint_samples 6300 joint_samples 17 [859924, 1029586] processed_samples 6301 unjoint_samples 6300 joint_samples 18 [1046540, 4719] processed_samples 6301 unjoint_samples 6300 joint_samples 18 [1046540, 4719] [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure processed_samples 6401 unjoint_samples 6400 joint_samples 18 [1046540, 225548] processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1045624, 676672] processed_samples 6401 unjoint_samples 6400 joint_samples 18 [1046540, 225548] processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1045624, 676672] processed_samples 6400 unjoint_samples 6400 joint_samples 18 [143802, 1032535] processed_samples 6400 unjoint_samples 6400 joint_samples 18 [143802, 1032535] processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046898, 517482] processed_samples 6400 unjoint_samples 6400 joint_samples 18 [851074, 1047371] processed_samples 6400 unjoint_samples 6400 joint_samples 18 [851074, 1047371] processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046898, 517482] processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046524, 973528] processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046524, 973528] processed_samples 6401 unjoint_samples 6400 joint_samples 19 [1043523, 9824] processed_samples 6401 unjoint_samples 6400 joint_samples 19 [1043523, 9824] processed_samples 6401 unjoint_samples 6400 joint_samples 18 [1047479, 323111] processed_samples 6401 unjoint_samples 6400 joint_samples 18 [1047479, 323111] [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1046524, 184358] [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure processed_samples 6500 unjoint_samples 6500 joint_samples 18 [451798, 1032535] processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1045624, 1040641] [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1046524, 184358] processed_samples 6501 unjoint_samples 6500 joint_samples 19 [1043523, 309209] processed_samples 6500 unjoint_samples 6500 joint_samples 18 [451798, 1032535] processed_samples 6501 unjoint_samples 6500 joint_samples 18 [1046540, 557508] processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1045106, 170940] processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1045624, 1040641] processed_samples 6501 unjoint_samples 6500 joint_samples 19 [1043523, 309209] [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1045106, 170940] processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1046898, 814780] processed_samples 6501 unjoint_samples 6500 joint_samples 18 [1046540, 557508] processed_samples 6501 unjoint_samples 6500 joint_samples 18 [1047479, 532863] processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1046898, 814780] processed_samples 6501 unjoint_samples 6500 joint_samples 18 [1047479, 532863] [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure processed_samples 6600 unjoint_samples 6600 joint_samples 19 [234141, 1042947] processed_samples 6600 unjoint_samples 6600 joint_samples 19 [234141, 1042947] [h264 @ 0x561288faff40] mmco: unref short failure processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1047097, 53572] [h264 @ 0x55e22b5d3980] mmco: unref short failure processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1045106, 580223] processed_samples 6601 unjoint_samples 6600 joint_samples 19 [1043523, 668957] processed_samples 6600 unjoint_samples 6600 joint_samples 18 [741007, 1032535] processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1046524, 451503] processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1046524, 451503] processed_samples 6601 unjoint_samples 6600 joint_samples 19 [1043523, 668957] processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1047097, 53572] [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 6601 unjoint_samples 6600 joint_samples 18 [1046540, 832157] processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1045106, 580223] processed_samples 6600 unjoint_samples 6600 joint_samples 18 [741007, 1032535] processed_samples 6601 unjoint_samples 6600 joint_samples 18 [1046540, 832157] processed_samples 6601 unjoint_samples 6600 joint_samples 18 [1047479, 890685] processed_samples 6601 unjoint_samples 6600 joint_samples 18 [1047479, 890685] [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1046524, 732199] processed_samples 6700 unjoint_samples 6700 joint_samples 19 [504443, 1042947] processed_samples 6700 unjoint_samples 6700 joint_samples 19 [33537, 1034796] processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1046524, 732199] processed_samples 6700 unjoint_samples 6700 joint_samples 19 [504443, 1042947] processed_samples 6700 unjoint_samples 6700 joint_samples 19 [33537, 1034796] [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure processed_samples 6701 unjoint_samples 6700 joint_samples 19 [1046540, 135775] processed_samples 6701 unjoint_samples 6700 joint_samples 19 [1046540, 135775] processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1045106, 872680] processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1047097, 388127] processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1045106, 872680] processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1047097, 388127] processed_samples 6701 unjoint_samples 6700 joint_samples 19 [1043523, 973813] processed_samples 6701 unjoint_samples 6700 joint_samples 19 [1043523, 973813] processed_samples 6701 unjoint_samples 6700 joint_samples 19 [118045, 1039439] processed_samples 6701 unjoint_samples 6700 joint_samples 19 [118045, 1039439] [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612893e2ec0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure processed_samples 6800 unjoint_samples 6800 joint_samples 20 [33655, 1040571] processed_samples 6800 unjoint_samples 6800 joint_samples 19 [325435, 1034796] processed_samples 6800 unjoint_samples 6800 joint_samples 20 [33655, 1040571] [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure processed_samples 6800 unjoint_samples 6800 joint_samples 19 [786586, 1042947] [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure processed_samples 6800 unjoint_samples 6800 joint_samples 20 [188679, 1005774] processed_samples 6800 unjoint_samples 6800 joint_samples 19 [325435, 1034796] processed_samples 6800 unjoint_samples 6800 joint_samples 20 [188679, 1005774] processed_samples 6801 unjoint_samples 6800 joint_samples 19 [1046540, 515109] processed_samples 6801 unjoint_samples 6800 joint_samples 20 [1043523, 259074] processed_samples 6801 unjoint_samples 6800 joint_samples 20 [1043523, 259074] processed_samples 6801 unjoint_samples 6800 joint_samples 19 [494241, 1039439] processed_samples 6801 unjoint_samples 6800 joint_samples 19 [1046540, 515109] processed_samples 6801 unjoint_samples 6800 joint_samples 19 [494241, 1039439] processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1047097, 891407] processed_samples 6800 unjoint_samples 6800 joint_samples 19 [786586, 1042947] processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1047097, 891407] [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 6900 unjoint_samples 6900 joint_samples 20 [68071, 1047201] [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure processed_samples 6900 unjoint_samples 6900 joint_samples 20 [727109, 1005774] processed_samples 6900 unjoint_samples 6900 joint_samples 19 [619546, 1034796] processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1047097, 94553] processed_samples 6900 unjoint_samples 6900 joint_samples 20 [311525, 1040571] processed_samples 6901 unjoint_samples 6900 joint_samples 19 [1046540, 833660] processed_samples 6901 unjoint_samples 6900 joint_samples 20 [1043523, 593430] processed_samples 6901 unjoint_samples 6900 joint_samples 19 [847989, 1039439] [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure processed_samples 6900 unjoint_samples 6900 joint_samples 20 [68071, 1047201] [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure processed_samples 6900 unjoint_samples 6900 joint_samples 20 [311525, 1040571] processed_samples 6900 unjoint_samples 6900 joint_samples 19 [619546, 1034796] processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1047097, 94553] processed_samples 6900 unjoint_samples 6900 joint_samples 20 [727109, 1005774] processed_samples 6901 unjoint_samples 6900 joint_samples 20 [1043523, 593430] processed_samples 6901 unjoint_samples 6900 joint_samples 19 [1046540, 833660] [h264 @ 0x55e22b3bf7c0] mmco: unref short failure processed_samples 6901 unjoint_samples 6900 joint_samples 19 [847989, 1039439] [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure processed_samples 7000 unjoint_samples 7000 joint_samples 20 [552028, 1040571] processed_samples 7000 unjoint_samples 7000 joint_samples 20 [437063, 1047201] processed_samples 7000 unjoint_samples 7000 joint_samples 20 [552028, 1040571] processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1047097, 459009] processed_samples 7000 unjoint_samples 7000 joint_samples 20 [437063, 1047201] processed_samples 7000 unjoint_samples 7000 joint_samples 19 [970102, 1034796] processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1047097, 459009] processed_samples 7000 unjoint_samples 7000 joint_samples 19 [970102, 1034796] processed_samples 7000 unjoint_samples 7000 joint_samples 21 [1038602, 31367] processed_samples 7000 unjoint_samples 7000 joint_samples 21 [1038602, 31367] processed_samples 7001 unjoint_samples 7000 joint_samples 20 [156728, 1005199] processed_samples 7001 unjoint_samples 7000 joint_samples 20 [156728, 1005199] processed_samples 7001 unjoint_samples 7000 joint_samples 20 [153017, 1039439] processed_samples 7001 unjoint_samples 7000 joint_samples 20 [153017, 1039439] [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure processed_samples 7001 unjoint_samples 7000 joint_samples 20 [1043523, 953917] [h264 @ 0x55e233b15ac0] mmco: unref short failure [h264 @ 0x55e233b15ac0] mmco: unref short failure processed_samples 7001 unjoint_samples 7000 joint_samples 20 [1043523, 953917] [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure processed_samples 7100 unjoint_samples 7100 joint_samples 21 [1038602, 325126] processed_samples 7101 unjoint_samples 7100 joint_samples 21 [1047908, 125165] processed_samples 7100 unjoint_samples 7100 joint_samples 20 [275192, 1034796] processed_samples 7100 unjoint_samples 7100 joint_samples 20 [840012, 1047201] processed_samples 7101 unjoint_samples 7100 joint_samples 20 [483793, 1005199] processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1047097, 862064] [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure processed_samples 7101 unjoint_samples 7100 joint_samples 20 [403533, 1039439] processed_samples 7100 unjoint_samples 7100 joint_samples 20 [875795, 1040571] [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure processed_samples 7101 unjoint_samples 7100 joint_samples 21 [1047908, 125165] processed_samples 7100 unjoint_samples 7100 joint_samples 20 [275192, 1034796] processed_samples 7100 unjoint_samples 7100 joint_samples 21 [1038602, 325126] processed_samples 7100 unjoint_samples 7100 joint_samples 20 [840012, 1047201] processed_samples 7100 unjoint_samples 7100 joint_samples 20 [875795, 1040571] processed_samples 7101 unjoint_samples 7100 joint_samples 20 [403533, 1039439] processed_samples 7101 unjoint_samples 7100 joint_samples 20 [483793, 1005199] processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1047097, 862064] [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure processed_samples 7200 unjoint_samples 7200 joint_samples 21 [1047121, 82959] processed_samples 7200 unjoint_samples 7200 joint_samples 21 [166838, 1044425] processed_samples 7200 unjoint_samples 7200 joint_samples 21 [1047121, 82959] processed_samples 7200 unjoint_samples 7200 joint_samples 21 [237138, 1046758] processed_samples 7200 unjoint_samples 7200 joint_samples 21 [166838, 1044425] processed_samples 7200 unjoint_samples 7200 joint_samples 21 [237138, 1046758] processed_samples 7200 unjoint_samples 7200 joint_samples 20 [701927, 1034796] processed_samples 7200 unjoint_samples 7200 joint_samples 20 [701927, 1034796] processed_samples 7201 unjoint_samples 7200 joint_samples 20 [720715, 1005199] processed_samples 7200 unjoint_samples 7200 joint_samples 21 [1038602, 635465] processed_samples 7201 unjoint_samples 7200 joint_samples 21 [1047908, 450098] processed_samples 7201 unjoint_samples 7200 joint_samples 21 [1047908, 450098] processed_samples 7200 unjoint_samples 7200 joint_samples 21 [1038602, 635465] processed_samples 7201 unjoint_samples 7200 joint_samples 20 [720715, 1005199] processed_samples 7201 unjoint_samples 7200 joint_samples 20 [742086, 1039439] processed_samples 7201 unjoint_samples 7200 joint_samples 20 [742086, 1039439] [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1047121, 404966] processed_samples 7300 unjoint_samples 7300 joint_samples 21 [572731, 1046758] processed_samples 7300 unjoint_samples 7300 joint_samples 21 [581886, 1044425] processed_samples 7300 unjoint_samples 7300 joint_samples 20 [966011, 1034796] processed_samples 7301 unjoint_samples 7300 joint_samples 21 [1047908, 912494] processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1038602, 1001113] processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1047121, 404966] processed_samples 7300 unjoint_samples 7300 joint_samples 21 [581886, 1044425] processed_samples 7300 unjoint_samples 7300 joint_samples 21 [572731, 1046758] processed_samples 7301 unjoint_samples 7300 joint_samples 21 [1016533, 262948] processed_samples 7300 unjoint_samples 7300 joint_samples 20 [966011, 1034796] processed_samples 7301 unjoint_samples 7300 joint_samples 21 [1047908, 912494] processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1038602, 1001113] processed_samples 7301 unjoint_samples 7300 joint_samples 21 [1016533, 262948] processed_samples 7301 unjoint_samples 7300 joint_samples 20 [1008497, 1009380] [h264 @ 0x55e229d0e180] mmco: unref short failure processed_samples 7301 unjoint_samples 7300 joint_samples 20 [1008497, 1009380] [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure processed_samples 7400 unjoint_samples 7400 joint_samples 22 [1047073, 277004] processed_samples 7401 unjoint_samples 7400 joint_samples 21 [255402, 1046459] processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1033051, 207594] processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1047121, 738181] processed_samples 7400 unjoint_samples 7400 joint_samples 21 [933485, 1044425] processed_samples 7400 unjoint_samples 7400 joint_samples 21 [823093, 1046758] processed_samples 7401 unjoint_samples 7400 joint_samples 22 [240061, 1047717] processed_samples 7401 unjoint_samples 7400 joint_samples 21 [1016533, 531694] processed_samples 7400 unjoint_samples 7400 joint_samples 21 [823093, 1046758] processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1033051, 207594] processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1047121, 738181] processed_samples 7400 unjoint_samples 7400 joint_samples 22 [1047073, 277004] processed_samples 7401 unjoint_samples 7400 joint_samples 22 [240061, 1047717] processed_samples 7401 unjoint_samples 7400 joint_samples 21 [255402, 1046459] processed_samples 7401 unjoint_samples 7400 joint_samples 21 [1016533, 531694] [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure processed_samples 7400 unjoint_samples 7400 joint_samples 21 [933485, 1044425] [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1047240, 57802] processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1047240, 57802] processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1046048, 225803] processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1046048, 225803] processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1033051, 551260] processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1047896, 157963] processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1047896, 157963] processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1033051, 551260] processed_samples 7501 unjoint_samples 7500 joint_samples 22 [521580, 1047717] processed_samples 7501 unjoint_samples 7500 joint_samples 22 [521580, 1047717] [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1047073, 544071] processed_samples 7501 unjoint_samples 7500 joint_samples 21 [548343, 1046459] processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1047073, 544071] processed_samples 7501 unjoint_samples 7500 joint_samples 21 [1016533, 822762] processed_samples 7501 unjoint_samples 7500 joint_samples 21 [1016533, 822762] processed_samples 7501 unjoint_samples 7500 joint_samples 21 [548343, 1046459] [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1047240, 370710] processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1046048, 493169] processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1046048, 493169] processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1047240, 370710] processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1047896, 430621] processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1047896, 430621] processed_samples 7601 unjoint_samples 7600 joint_samples 21 [1014005, 1046459] processed_samples 7601 unjoint_samples 7600 joint_samples 21 [1014005, 1046459] processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1047073, 817893] processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1047073, 817893] processed_samples 7601 unjoint_samples 7600 joint_samples 22 [87746, 1044171] processed_samples 7601 unjoint_samples 7600 joint_samples 22 [87746, 1044171] processed_samples 7601 unjoint_samples 7600 joint_samples 22 [849671, 1047717] processed_samples 7601 unjoint_samples 7600 joint_samples 22 [849671, 1047717] processed_samples 7600 unjoint_samples 7600 joint_samples 21 [1033051, 874973] processed_samples 7600 unjoint_samples 7600 joint_samples 21 [1033051, 874973] [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure processed_samples 7700 unjoint_samples 7700 joint_samples 23 [1047073, 63410] processed_samples 7701 unjoint_samples 7700 joint_samples 22 [346031, 1046459] processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1047240, 670549] processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1047896, 812003] processed_samples 7700 unjoint_samples 7700 joint_samples 22 [182331, 1043053] processed_samples 7700 unjoint_samples 7700 joint_samples 23 [1047073, 63410] processed_samples 7701 unjoint_samples 7700 joint_samples 22 [346031, 1046459] processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1047240, 670549] processed_samples 7700 unjoint_samples 7700 joint_samples 22 [182331, 1043053] processed_samples 7701 unjoint_samples 7700 joint_samples 22 [339559, 1044171] processed_samples 7701 unjoint_samples 7700 joint_samples 23 [1046971, 120673] processed_samples 7701 unjoint_samples 7700 joint_samples 23 [1046971, 120673] processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1046048, 852046] processed_samples 7701 unjoint_samples 7700 joint_samples 22 [339559, 1044171] processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1047896, 812003] processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1046048, 852046] [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e228c11900] mmco: unref short failure [h264 @ 0x55e228c11900] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure processed_samples 7800 unjoint_samples 7800 joint_samples 22 [473571, 1043053] processed_samples 7800 unjoint_samples 7800 joint_samples 23 [100241, 1047174] processed_samples 7800 unjoint_samples 7800 joint_samples 23 [100241, 1047174] processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1047240, 987903] processed_samples 7801 unjoint_samples 7800 joint_samples 22 [733646, 1046459] [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1047073, 414208] processed_samples 7801 unjoint_samples 7800 joint_samples 23 [1046971, 527099] processed_samples 7800 unjoint_samples 7800 joint_samples 22 [473571, 1043053] processed_samples 7801 unjoint_samples 7800 joint_samples 22 [733646, 1046459] [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1047073, 414208] processed_samples 7801 unjoint_samples 7800 joint_samples 23 [1046971, 527099] processed_samples 7801 unjoint_samples 7800 joint_samples 22 [761894, 1044171] processed_samples 7801 unjoint_samples 7800 joint_samples 22 [761894, 1044171] processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1047896, 1004907] processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1047896, 1004907] processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1047240, 987903] [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure processed_samples 7900 unjoint_samples 7900 joint_samples 23 [271044, 1047652] processed_samples 7900 unjoint_samples 7900 joint_samples 23 [319446, 1045711] processed_samples 7900 unjoint_samples 7900 joint_samples 23 [433159, 1047174] processed_samples 7901 unjoint_samples 7900 joint_samples 22 [970934, 1046459] processed_samples 7901 unjoint_samples 7900 joint_samples 23 [1047960, 51823] processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1047073, 659087] processed_samples 7901 unjoint_samples 7900 joint_samples 23 [1046971, 1014235] processed_samples 7900 unjoint_samples 7900 joint_samples 23 [319446, 1045711] processed_samples 7900 unjoint_samples 7900 joint_samples 23 [271044, 1047652] processed_samples 7900 unjoint_samples 7900 joint_samples 23 [433159, 1047174] processed_samples 7901 unjoint_samples 7900 joint_samples 23 [1047960, 51823] processed_samples 7900 unjoint_samples 7900 joint_samples 22 [762043, 1043053] processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1047073, 659087] [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 7901 unjoint_samples 7900 joint_samples 22 [970934, 1046459] processed_samples 7901 unjoint_samples 7900 joint_samples 23 [1046971, 1014235] processed_samples 7900 unjoint_samples 7900 joint_samples 22 [762043, 1043053] [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128cd19800] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure processed_samples 8000 unjoint_samples 8000 joint_samples 23 [88132, 1046525] processed_samples 8000 unjoint_samples 8000 joint_samples 23 [88132, 1046525] processed_samples 8000 unjoint_samples 8000 joint_samples 23 [609474, 1045711] processed_samples 8000 unjoint_samples 8000 joint_samples 23 [609474, 1045711] processed_samples 8000 unjoint_samples 8000 joint_samples 23 [839172, 1047174] processed_samples 8000 unjoint_samples 8000 joint_samples 23 [839172, 1047174] processed_samples 8001 unjoint_samples 8000 joint_samples 23 [227203, 1046459] processed_samples 8001 unjoint_samples 8000 joint_samples 23 [227203, 1046459] processed_samples 8001 unjoint_samples 8000 joint_samples 24 [280281, 1030767] [h264 @ 0x56128cd1c740] mmco: unref short failure processed_samples 8001 unjoint_samples 8000 joint_samples 24 [280281, 1030767] [h264 @ 0x55e22a9e1b40] mmco: unref short failure processed_samples 8001 unjoint_samples 8000 joint_samples 23 [1047960, 376357] processed_samples 8001 unjoint_samples 8000 joint_samples 23 [1047960, 376357] processed_samples 8000 unjoint_samples 8000 joint_samples 23 [565171, 1047652] processed_samples 8000 unjoint_samples 8000 joint_samples 23 [565171, 1047652] processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1047073, 912762] processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1047073, 912762] [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure processed_samples 8100 unjoint_samples 8100 joint_samples 24 [36856, 1047174] processed_samples 8100 unjoint_samples 8100 joint_samples 24 [36856, 1047174] processed_samples 8100 unjoint_samples 8100 joint_samples 23 [426981, 1046525] processed_samples 8100 unjoint_samples 8100 joint_samples 24 [190304, 1046137] processed_samples 8100 unjoint_samples 8100 joint_samples 23 [426981, 1046525] processed_samples 8100 unjoint_samples 8100 joint_samples 24 [190304, 1046137] processed_samples 8101 unjoint_samples 8100 joint_samples 24 [556361, 1030767] processed_samples 8100 unjoint_samples 8100 joint_samples 23 [1027966, 1045711] processed_samples 8100 unjoint_samples 8100 joint_samples 23 [829470, 1047652] processed_samples 8101 unjoint_samples 8100 joint_samples 23 [521363, 1046459] processed_samples 8101 unjoint_samples 8100 joint_samples 24 [556361, 1030767] processed_samples 8101 unjoint_samples 8100 joint_samples 23 [521363, 1046459] processed_samples 8100 unjoint_samples 8100 joint_samples 23 [1027966, 1045711] processed_samples 8100 unjoint_samples 8100 joint_samples 23 [829470, 1047652] processed_samples 8101 unjoint_samples 8100 joint_samples 23 [1047960, 644193] processed_samples 8101 unjoint_samples 8100 joint_samples 23 [1047960, 644193] [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1042457, 88864] processed_samples 8200 unjoint_samples 8200 joint_samples 24 [406898, 1047174] processed_samples 8200 unjoint_samples 8200 joint_samples 24 [208185, 1045711] processed_samples 8200 unjoint_samples 8200 joint_samples 23 [739194, 1046525] processed_samples 8200 unjoint_samples 8200 joint_samples 24 [406898, 1047174] processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1042457, 88864] processed_samples 8200 unjoint_samples 8200 joint_samples 24 [495272, 1046137] processed_samples 8200 unjoint_samples 8200 joint_samples 24 [208185, 1045711] processed_samples 8200 unjoint_samples 8200 joint_samples 24 [495272, 1046137] processed_samples 8200 unjoint_samples 8200 joint_samples 23 [739194, 1046525] processed_samples 8201 unjoint_samples 8200 joint_samples 23 [851318, 1046459] processed_samples 8201 unjoint_samples 8200 joint_samples 24 [954188, 1030767] processed_samples 8201 unjoint_samples 8200 joint_samples 24 [954188, 1030767] processed_samples 8201 unjoint_samples 8200 joint_samples 23 [1047960, 944706] [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure processed_samples 8201 unjoint_samples 8200 joint_samples 23 [1047960, 944706] processed_samples 8201 unjoint_samples 8200 joint_samples 23 [851318, 1046459] [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x5612891da640] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x561287f63080] mmco: unref short failure processed_samples 8300 unjoint_samples 8300 joint_samples 24 [427668, 1045711] processed_samples 8300 unjoint_samples 8300 joint_samples 24 [631780, 1047174] processed_samples 8301 unjoint_samples 8300 joint_samples 24 [48022, 1046459] processed_samples 8300 unjoint_samples 8300 joint_samples 24 [873346, 1046137] processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1042457, 470774] processed_samples 8301 unjoint_samples 8300 joint_samples 25 [1027126, 257407] processed_samples 8300 unjoint_samples 8300 joint_samples 24 [631780, 1047174] processed_samples 8300 unjoint_samples 8300 joint_samples 24 [427668, 1045711] processed_samples 8301 unjoint_samples 8300 joint_samples 24 [48022, 1046459] processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1042457, 470774] processed_samples 8300 unjoint_samples 8300 joint_samples 24 [873346, 1046137] processed_samples 8301 unjoint_samples 8300 joint_samples 25 [1027126, 257407] processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1048317, 4239] processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1048317, 4239] processed_samples 8301 unjoint_samples 8300 joint_samples 24 [162100, 1048097] processed_samples 8301 unjoint_samples 8300 joint_samples 24 [162100, 1048097] [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1048317, 364966] processed_samples 8400 unjoint_samples 8400 joint_samples 25 [1046824, 156931] [h264 @ 0x55e228b87a80] mmco: unref short failure processed_samples 8401 unjoint_samples 8400 joint_samples 24 [401596, 1046459] processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1042457, 813759] processed_samples 8400 unjoint_samples 8400 joint_samples 24 [696937, 1045711] processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1048317, 364966] processed_samples 8400 unjoint_samples 8400 joint_samples 25 [1046824, 156931] [h264 @ 0x56128a0be8c0] mmco: unref short failure processed_samples 8401 unjoint_samples 8400 joint_samples 25 [1027126, 539602] processed_samples 8401 unjoint_samples 8400 joint_samples 24 [483812, 1048097] processed_samples 8400 unjoint_samples 8400 joint_samples 24 [960507, 1047174] [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 8401 unjoint_samples 8400 joint_samples 24 [401596, 1046459] processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1042457, 813759] [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure processed_samples 8400 unjoint_samples 8400 joint_samples 24 [696937, 1045711] [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 8401 unjoint_samples 8400 joint_samples 24 [483812, 1048097] processed_samples 8401 unjoint_samples 8400 joint_samples 25 [1027126, 539602] processed_samples 8400 unjoint_samples 8400 joint_samples 24 [960507, 1047174] [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure processed_samples 8500 unjoint_samples 8500 joint_samples 25 [530803, 1047174] processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1048317, 684674] processed_samples 8501 unjoint_samples 8500 joint_samples 24 [671596, 1046459] processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1046824, 433695] processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1024269, 1045711] processed_samples 8501 unjoint_samples 8500 joint_samples 25 [1027126, 848355] [h264 @ 0x561289ff4fc0] mmco: unref short failure processed_samples 8500 unjoint_samples 8500 joint_samples 25 [22640, 1043816] processed_samples 8500 unjoint_samples 8500 joint_samples 25 [530803, 1047174] processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1048317, 684674] processed_samples 8501 unjoint_samples 8500 joint_samples 24 [671596, 1046459] processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1046824, 433695] processed_samples 8501 unjoint_samples 8500 joint_samples 25 [1027126, 848355] processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1024269, 1045711] processed_samples 8500 unjoint_samples 8500 joint_samples 25 [22640, 1043816] processed_samples 8501 unjoint_samples 8500 joint_samples 24 [887065, 1048097] processed_samples 8501 unjoint_samples 8500 joint_samples 24 [887065, 1048097] [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228bdaac0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure processed_samples 8600 unjoint_samples 8600 joint_samples 25 [873784, 1047174] processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1045194, 436737] processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1045194, 436737] processed_samples 8600 unjoint_samples 8600 joint_samples 25 [873784, 1047174] processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1046824, 727869] processed_samples 8601 unjoint_samples 8600 joint_samples 26 [250860, 1045913] processed_samples 8600 unjoint_samples 8600 joint_samples 25 [329002, 1043816] processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1046824, 727869] processed_samples 8601 unjoint_samples 8600 joint_samples 26 [250860, 1045913] processed_samples 8601 unjoint_samples 8600 joint_samples 25 [1015537, 121801] processed_samples 8600 unjoint_samples 8600 joint_samples 25 [329002, 1043816] processed_samples 8600 unjoint_samples 8600 joint_samples 24 [1048317, 1010357] processed_samples 8601 unjoint_samples 8600 joint_samples 24 [1006020, 1046459] processed_samples 8601 unjoint_samples 8600 joint_samples 24 [1006020, 1046459] processed_samples 8601 unjoint_samples 8600 joint_samples 25 [1015537, 121801] processed_samples 8600 unjoint_samples 8600 joint_samples 24 [1048317, 1010357] [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e228bdaac0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure processed_samples 8700 unjoint_samples 8700 joint_samples 26 [1040234, 208177] processed_samples 8700 unjoint_samples 8700 joint_samples 25 [231437, 1046467] processed_samples 8701 unjoint_samples 8700 joint_samples 25 [230059, 1046459] processed_samples 8700 unjoint_samples 8700 joint_samples 25 [541928, 1043816] processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1046824, 1023633] processed_samples 8701 unjoint_samples 8700 joint_samples 25 [1015537, 381069] processed_samples 8701 unjoint_samples 8700 joint_samples 26 [540160, 1045913] [h264 @ 0x56128a018cc0] mmco: unref short failure processed_samples 8700 unjoint_samples 8700 joint_samples 25 [231437, 1046467] processed_samples 8700 unjoint_samples 8700 joint_samples 26 [1040234, 208177] processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1045194, 672413] processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1045194, 672413] processed_samples 8701 unjoint_samples 8700 joint_samples 25 [230059, 1046459] processed_samples 8700 unjoint_samples 8700 joint_samples 25 [541928, 1043816] processed_samples 8701 unjoint_samples 8700 joint_samples 26 [540160, 1045913] [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure processed_samples 8701 unjoint_samples 8700 joint_samples 25 [1015537, 381069] processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1046824, 1023633] [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e228bdaac0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1045194, 946679] [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1040234, 476936] processed_samples 8800 unjoint_samples 8800 joint_samples 25 [572483, 1046467] processed_samples 8801 unjoint_samples 8800 joint_samples 25 [499084, 1046459] [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure processed_samples 8801 unjoint_samples 8800 joint_samples 26 [881401, 1045913] processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1047409, 269889] processed_samples 8801 unjoint_samples 8800 joint_samples 25 [1015537, 674972] processed_samples 8800 unjoint_samples 8800 joint_samples 25 [894112, 1043816] [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1045194, 946679] [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1040234, 476936] processed_samples 8800 unjoint_samples 8800 joint_samples 25 [572483, 1046467] [h264 @ 0x55e22a4b04c0] mmco: unref short failure processed_samples 8801 unjoint_samples 8800 joint_samples 25 [499084, 1046459] processed_samples 8801 unjoint_samples 8800 joint_samples 26 [881401, 1045913] processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1047409, 269889] processed_samples 8801 unjoint_samples 8800 joint_samples 25 [1015537, 674972] [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 8800 unjoint_samples 8800 joint_samples 25 [894112, 1043816] [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1046550, 70190] processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1046550, 70190] processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1040234, 843035] processed_samples 8900 unjoint_samples 8900 joint_samples 26 [277500, 1025202] processed_samples 8900 unjoint_samples 8900 joint_samples 26 [277500, 1025202] processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1040234, 843035] processed_samples 8901 unjoint_samples 8900 joint_samples 27 [1010873, 184629] processed_samples 8901 unjoint_samples 8900 joint_samples 27 [1010873, 184629] processed_samples 8900 unjoint_samples 8900 joint_samples 25 [861670, 1046467] processed_samples 8901 unjoint_samples 8900 joint_samples 25 [865491, 1046459] processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1047409, 526108] processed_samples 8901 unjoint_samples 8900 joint_samples 25 [1015537, 990470] processed_samples 8900 unjoint_samples 8900 joint_samples 25 [861670, 1046467] processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1047409, 526108] processed_samples 8901 unjoint_samples 8900 joint_samples 25 [865491, 1046459] processed_samples 8901 unjoint_samples 8900 joint_samples 25 [1015537, 990470] [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e2298ad5c0] mmco: unref short failure [h264 @ 0x55e2298ad5c0] mmco: unref short failure processed_samples 9000 unjoint_samples 9000 joint_samples 26 [614454, 1025202] processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1047379, 154889] processed_samples 9000 unjoint_samples 9000 joint_samples 27 [1043811, 113896] processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1046550, 326611] processed_samples 9001 unjoint_samples 9000 joint_samples 27 [1010873, 522400] processed_samples 9001 unjoint_samples 9000 joint_samples 26 [227420, 1042202] processed_samples 9001 unjoint_samples 9000 joint_samples 26 [1045319, 150890] processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1047409, 969190] processed_samples 9000 unjoint_samples 9000 joint_samples 26 [614454, 1025202] processed_samples 9000 unjoint_samples 9000 joint_samples 27 [1043811, 113896] processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1047379, 154889] processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1046550, 326611] processed_samples 9001 unjoint_samples 9000 joint_samples 27 [1010873, 522400] processed_samples 9001 unjoint_samples 9000 joint_samples 26 [227420, 1042202] processed_samples 9001 unjoint_samples 9000 joint_samples 26 [1045319, 150890] processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1047409, 969190] [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e229c73a40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229c73a40] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e229c73a40] mmco: unref short failure [h264 @ 0x55e229c73a40] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure processed_samples 9100 unjoint_samples 9100 joint_samples 26 [1047379, 500697] processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1043811, 398530] processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1047409, 229329] [h264 @ 0x55e22d0028c0] mmco: unref short failure processed_samples 9101 unjoint_samples 9100 joint_samples 26 [1045319, 404284] processed_samples 9100 unjoint_samples 9100 joint_samples 26 [1046550, 690754] processed_samples 9101 unjoint_samples 9100 joint_samples 26 [676557, 1042202] processed_samples 9100 unjoint_samples 9100 joint_samples 26 [942975, 1025202] processed_samples 9101 unjoint_samples 9100 joint_samples 27 [1010873, 812383] [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure processed_samples 9100 unjoint_samples 9100 joint_samples 26 [1047379, 500697] processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1043811, 398530] [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1047409, 229329] processed_samples 9101 unjoint_samples 9100 joint_samples 26 [1045319, 404284] [h264 @ 0x55e22ad7b180] mmco: unref short failure processed_samples 9101 unjoint_samples 9100 joint_samples 27 [1010873, 812383] processed_samples 9100 unjoint_samples 9100 joint_samples 26 [942975, 1025202] processed_samples 9101 unjoint_samples 9100 joint_samples 26 [676557, 1042202] processed_samples 9100 unjoint_samples 9100 joint_samples 26 [1046550, 690754] [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22a882580] mmco: unref short failure [h264 @ 0x55e22a882580] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure processed_samples 9200 unjoint_samples 9200 joint_samples 26 [1047379, 846054] processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1018313, 223446] processed_samples 9200 unjoint_samples 9200 joint_samples 26 [1047379, 846054] processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1043811, 660074] processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1018313, 223446] processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1043811, 660074] processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1047409, 526528] processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1047409, 526528] processed_samples 9201 unjoint_samples 9200 joint_samples 26 [1045319, 685846] processed_samples 9200 unjoint_samples 9200 joint_samples 26 [1046550, 947469] [h264 @ 0x55e2293f4500] mmco: unref short failure processed_samples 9201 unjoint_samples 9200 joint_samples 26 [1045319, 685846] [h264 @ 0x561289115f80] mmco: unref short failure processed_samples 9201 unjoint_samples 9200 joint_samples 27 [1027729, 1027945] processed_samples 9200 unjoint_samples 9200 joint_samples 26 [1046550, 947469] processed_samples 9201 unjoint_samples 9200 joint_samples 27 [1027729, 1027945] processed_samples 9201 unjoint_samples 9200 joint_samples 26 [918761, 1042202] processed_samples 9201 unjoint_samples 9200 joint_samples 26 [918761, 1042202] [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e229c73a40] mmco: unref short failure [h264 @ 0x5612891da640] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1046550, 140756] processed_samples 9300 unjoint_samples 9300 joint_samples 27 [26001, 1044298] processed_samples 9301 unjoint_samples 9300 joint_samples 26 [1045319, 882157] processed_samples 9300 unjoint_samples 9300 joint_samples 27 [26001, 1044298] processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1046550, 140756] processed_samples 9301 unjoint_samples 9300 joint_samples 27 [1047356, 137958] processed_samples 9301 unjoint_samples 9300 joint_samples 27 [1047356, 137958] processed_samples 9301 unjoint_samples 9300 joint_samples 26 [1045319, 882157] processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1043811, 916399] processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1018313, 630691] processed_samples 9301 unjoint_samples 9300 joint_samples 28 [387656, 1039736] processed_samples 9301 unjoint_samples 9300 joint_samples 28 [387656, 1039736] processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1047409, 807244] processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1018313, 630691] processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1047409, 807244] processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1043811, 916399] [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure processed_samples 9400 unjoint_samples 9400 joint_samples 28 [297590, 844424] processed_samples 9400 unjoint_samples 9400 joint_samples 28 [297590, 844424] processed_samples 9400 unjoint_samples 9400 joint_samples 28 [1046281, 204458] processed_samples 9400 unjoint_samples 9400 joint_samples 28 [1046281, 204458] processed_samples 9400 unjoint_samples 9400 joint_samples 27 [377095, 1044298] processed_samples 9400 unjoint_samples 9400 joint_samples 27 [377095, 1044298] processed_samples 9401 unjoint_samples 9400 joint_samples 27 [233078, 1032295] processed_samples 9401 unjoint_samples 9400 joint_samples 27 [233078, 1032295] processed_samples 9401 unjoint_samples 9400 joint_samples 28 [711710, 1039736] processed_samples 9400 unjoint_samples 9400 joint_samples 28 [47541, 1047245] processed_samples 9400 unjoint_samples 9400 joint_samples 28 [47541, 1047245] processed_samples 9401 unjoint_samples 9400 joint_samples 28 [711710, 1039736] [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1046550, 432816] processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1046550, 432816] processed_samples 9401 unjoint_samples 9400 joint_samples 27 [1047356, 483902] processed_samples 9401 unjoint_samples 9400 joint_samples 27 [1047356, 483902] [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1046281, 489322] [h264 @ 0x55e22a9dd6c0] mmco: unref short failure processed_samples 9500 unjoint_samples 9500 joint_samples 28 [336229, 1047245] processed_samples 9500 unjoint_samples 9500 joint_samples 27 [726315, 1044298] processed_samples 9500 unjoint_samples 9500 joint_samples 27 [1046550, 697527] processed_samples 9501 unjoint_samples 9500 joint_samples 27 [590451, 1032295] processed_samples 9500 unjoint_samples 9500 joint_samples 27 [1046550, 697527] processed_samples 9500 unjoint_samples 9500 joint_samples 27 [726315, 1044298] processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1046281, 489322] processed_samples 9500 unjoint_samples 9500 joint_samples 28 [750473, 844424] processed_samples 9500 unjoint_samples 9500 joint_samples 28 [750473, 844424] [h264 @ 0x56128c660f40] mmco: unref short failure processed_samples 9501 unjoint_samples 9500 joint_samples 27 [590451, 1032295] processed_samples 9500 unjoint_samples 9500 joint_samples 28 [336229, 1047245] processed_samples 9501 unjoint_samples 9500 joint_samples 27 [1047356, 893557] processed_samples 9501 unjoint_samples 9500 joint_samples 28 [1043703, 1043134] processed_samples 9501 unjoint_samples 9500 joint_samples 28 [1043703, 1043134] processed_samples 9501 unjoint_samples 9500 joint_samples 27 [1047356, 893557] [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 9600 unjoint_samples 9600 joint_samples 27 [987752, 1044298] [h264 @ 0x55e229e89400] mmco: unref short failure processed_samples 9601 unjoint_samples 9600 joint_samples 29 [1046957, 267189] processed_samples 9600 unjoint_samples 9600 joint_samples 28 [574671, 1047245] processed_samples 9601 unjoint_samples 9600 joint_samples 28 [105157, 1040635] [h264 @ 0x561289953280] mmco: unref short failure processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1046281, 816505] processed_samples 9600 unjoint_samples 9600 joint_samples 27 [1046550, 932202] processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1035849, 1023239] processed_samples 9600 unjoint_samples 9600 joint_samples 27 [987752, 1044298] processed_samples 9601 unjoint_samples 9600 joint_samples 29 [1046957, 267189] processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1046281, 816505] processed_samples 9601 unjoint_samples 9600 joint_samples 27 [978587, 1032295] [h264 @ 0x561289953280] mmco: unref short failure processed_samples 9600 unjoint_samples 9600 joint_samples 28 [574671, 1047245] processed_samples 9601 unjoint_samples 9600 joint_samples 27 [978587, 1032295] processed_samples 9601 unjoint_samples 9600 joint_samples 28 [105157, 1040635] processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1035849, 1023239] processed_samples 9600 unjoint_samples 9600 joint_samples 27 [1046550, 932202] [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure processed_samples 9700 unjoint_samples 9700 joint_samples 29 [146321, 1032168] processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1046078, 157290] processed_samples 9701 unjoint_samples 9700 joint_samples 28 [1046573, 376808] processed_samples 9700 unjoint_samples 9700 joint_samples 29 [1045645, 322417] processed_samples 9700 unjoint_samples 9700 joint_samples 28 [191240, 1042775] [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure processed_samples 9700 unjoint_samples 9700 joint_samples 28 [844837, 1047245] processed_samples 9701 unjoint_samples 9700 joint_samples 28 [371620, 1040635] processed_samples 9701 unjoint_samples 9700 joint_samples 29 [1046957, 566150] [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure processed_samples 9700 unjoint_samples 9700 joint_samples 29 [146321, 1032168] processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1046078, 157290] processed_samples 9701 unjoint_samples 9700 joint_samples 28 [1046573, 376808] processed_samples 9700 unjoint_samples 9700 joint_samples 29 [1045645, 322417] processed_samples 9700 unjoint_samples 9700 joint_samples 28 [191240, 1042775] [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure processed_samples 9701 unjoint_samples 9700 joint_samples 28 [371620, 1040635] processed_samples 9701 unjoint_samples 9700 joint_samples 29 [1046957, 566150] processed_samples 9700 unjoint_samples 9700 joint_samples 28 [844837, 1047245] [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure processed_samples 9800 unjoint_samples 9800 joint_samples 29 [416927, 1032168] [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure processed_samples 9800 unjoint_samples 9800 joint_samples 29 [110924, 1047732] processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1045645, 716169] processed_samples 9801 unjoint_samples 9800 joint_samples 28 [1046573, 716593] processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1046078, 528091] processed_samples 9800 unjoint_samples 9800 joint_samples 28 [428567, 1042775] processed_samples 9801 unjoint_samples 9800 joint_samples 28 [704049, 1040635] [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure processed_samples 9801 unjoint_samples 9800 joint_samples 29 [1046957, 861756] [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure processed_samples 9800 unjoint_samples 9800 joint_samples 29 [416927, 1032168] [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure processed_samples 9800 unjoint_samples 9800 joint_samples 28 [428567, 1042775] processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1046078, 528091] processed_samples 9800 unjoint_samples 9800 joint_samples 29 [110924, 1047732] processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1045645, 716169] processed_samples 9801 unjoint_samples 9800 joint_samples 28 [1046573, 716593] processed_samples 9801 unjoint_samples 9800 joint_samples 29 [1046957, 861756] [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure processed_samples 9801 unjoint_samples 9800 joint_samples 28 [704049, 1040635] [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x5612915b0b40] [h264 @ 0x55e229572500] mmco: unref short failure mmco: unref short failure [h264 @ 0x55e229572500] [h264 @ 0x5612915b0b40] mmco: unref short failure mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure processed_samples 9901 unjoint_samples 9900 joint_samples 29 [1046728, 8692] processed_samples 9901 unjoint_samples 9900 joint_samples 29 [1046728, 8692] processed_samples 9901 unjoint_samples 9900 joint_samples 29 [67087, 1040635] processed_samples 9901 unjoint_samples 9900 joint_samples 29 [67087, 1040635] processed_samples 9900 unjoint_samples 9900 joint_samples 29 [560020, 1047732] processed_samples 9900 unjoint_samples 9900 joint_samples 28 [1046078, 823360] processed_samples 9901 unjoint_samples 9900 joint_samples 30 [1047353, 178821] processed_samples 9901 unjoint_samples 9900 joint_samples 30 [1047353, 178821] processed_samples 9900 unjoint_samples 9900 joint_samples 29 [560020, 1047732] processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1045645, 1037558] processed_samples 9900 unjoint_samples 9900 joint_samples 28 [730793, 1042775] processed_samples 9900 unjoint_samples 9900 joint_samples 28 [730793, 1042775] processed_samples 9900 unjoint_samples 9900 joint_samples 28 [1046078, 823360] processed_samples 9900 unjoint_samples 9900 joint_samples 29 [742774, 1032168] processed_samples 9900 unjoint_samples 9900 joint_samples 29 [742774, 1032168] processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1045645, 1037558] [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure processed_samples 10001 unjoint_samples 10000 joint_samples 29 [1046728, 373428] processed_samples 10000 unjoint_samples 10000 joint_samples 30 [384460, 1046455] processed_samples 10000 unjoint_samples 10000 joint_samples 29 [188804, 997234] processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1045470, 1047732] processed_samples 10000 unjoint_samples 10000 joint_samples 28 [993470, 1042775] processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1045470, 1047732] processed_samples 10001 unjoint_samples 10000 joint_samples 29 [390021, 1040635] processed_samples 10001 unjoint_samples 10000 joint_samples 29 [1046728, 373428] processed_samples 10001 unjoint_samples 10000 joint_samples 30 [1047353, 555714] processed_samples 10000 unjoint_samples 10000 joint_samples 30 [384460, 1046455] processed_samples 10000 unjoint_samples 10000 joint_samples 29 [188804, 997234] processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1046927, 1047064] processed_samples 10000 unjoint_samples 10000 joint_samples 28 [993470, 1042775] processed_samples 10001 unjoint_samples 10000 joint_samples 29 [390021, 1040635] processed_samples 10001 unjoint_samples 10000 joint_samples 30 [1047353, 555714] processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1046927, 1047064] [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure processed_samples 10100 unjoint_samples 10100 joint_samples 29 [1041765, 242046] processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1047700, 302991] processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1046927, 324813] processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1046927, 324813] processed_samples 10100 unjoint_samples 10100 joint_samples 29 [1041765, 242046] processed_samples 10100 unjoint_samples 10100 joint_samples 30 [679764, 1046455] processed_samples 10100 unjoint_samples 10100 joint_samples 29 [620423, 997234] processed_samples 10100 unjoint_samples 10100 joint_samples 29 [620423, 997234] processed_samples 10101 unjoint_samples 10100 joint_samples 29 [1046728, 639958] processed_samples 10100 unjoint_samples 10100 joint_samples 30 [679764, 1046455] processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1047700, 302991] processed_samples 10101 unjoint_samples 10100 joint_samples 29 [1046728, 639958] processed_samples 10101 unjoint_samples 10100 joint_samples 30 [1047353, 792655] processed_samples 10101 unjoint_samples 10100 joint_samples 29 [653665, 1040635] processed_samples 10101 unjoint_samples 10100 joint_samples 30 [1047353, 792655] processed_samples 10101 unjoint_samples 10100 joint_samples 29 [653665, 1040635] [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128cd1f000] mmco: unref short failure [h264 @ 0x56128cd1f000] mmco: unref short failure [h264 @ 0x56128cd1f000] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1047700, 582467] processed_samples 10201 unjoint_samples 10200 joint_samples 31 [53901, 1045753] processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1029668, 1046455] processed_samples 10201 unjoint_samples 10200 joint_samples 31 [53901, 1045753] processed_samples 10200 unjoint_samples 10200 joint_samples 29 [872602, 997234] processed_samples 10200 unjoint_samples 10200 joint_samples 29 [1041765, 614236] processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1047700, 582467] processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1046927, 652828] processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1046927, 652828] processed_samples 10200 unjoint_samples 10200 joint_samples 29 [872602, 997234] processed_samples 10200 unjoint_samples 10200 joint_samples 29 [1041765, 614236] processed_samples 10201 unjoint_samples 10200 joint_samples 29 [1046728, 892115] processed_samples 10201 unjoint_samples 10200 joint_samples 29 [926546, 1040635] processed_samples 10201 unjoint_samples 10200 joint_samples 29 [1046728, 892115] processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1029668, 1046455] [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure processed_samples 10201 unjoint_samples 10200 joint_samples 29 [926546, 1040635] [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 10300 unjoint_samples 10300 joint_samples 31 [214670, 1046455] processed_samples 10300 unjoint_samples 10300 joint_samples 31 [214670, 1046455] processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1046830, 131165] processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1046830, 131165] processed_samples 10300 unjoint_samples 10300 joint_samples 29 [1041765, 1011282] processed_samples 10301 unjoint_samples 10300 joint_samples 30 [92700, 1047809] processed_samples 10300 unjoint_samples 10300 joint_samples 29 [1041765, 1011282] processed_samples 10301 unjoint_samples 10300 joint_samples 30 [92700, 1047809] processed_samples 10301 unjoint_samples 10300 joint_samples 30 [116893, 1036095] processed_samples 10301 unjoint_samples 10300 joint_samples 30 [116893, 1036095] processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1046927, 987470] processed_samples 10301 unjoint_samples 10300 joint_samples 31 [359475, 1045753] processed_samples 10301 unjoint_samples 10300 joint_samples 31 [359475, 1045753] processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1046927, 987470] processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1047700, 944218] [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1047700, 944218] [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] [h264 @ 0x5612895b2700] mmco: unref short failure mmco: unref short failure [h264 @ 0x5612895b2700] [h264 @ 0x55e229e16240] mmco: unref short failure mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure processed_samples 10400 unjoint_samples 10400 joint_samples 31 [336806, 1046094] processed_samples 10400 unjoint_samples 10400 joint_samples 31 [336806, 1046094] processed_samples 10400 unjoint_samples 10400 joint_samples 30 [1046830, 417756] processed_samples 10400 unjoint_samples 10400 joint_samples 30 [1046830, 417756] processed_samples 10400 unjoint_samples 10400 joint_samples 30 [230079, 1042259] processed_samples 10401 unjoint_samples 10400 joint_samples 30 [452146, 1036095] processed_samples 10400 unjoint_samples 10400 joint_samples 31 [536701, 1046455] processed_samples 10400 unjoint_samples 10400 joint_samples 31 [130554, 1046983] processed_samples 10400 unjoint_samples 10400 joint_samples 30 [230079, 1042259] processed_samples 10400 unjoint_samples 10400 joint_samples 31 [130554, 1046983] processed_samples 10401 unjoint_samples 10400 joint_samples 31 [652329, 1045753] processed_samples 10401 unjoint_samples 10400 joint_samples 30 [452146, 1036095] processed_samples 10400 unjoint_samples 10400 joint_samples 31 [536701, 1046455] processed_samples 10401 unjoint_samples 10400 joint_samples 30 [382202, 1047809] processed_samples 10401 unjoint_samples 10400 joint_samples 31 [652329, 1045753] processed_samples 10401 unjoint_samples 10400 joint_samples 30 [382202, 1047809] [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure processed_samples 10500 unjoint_samples 10500 joint_samples 31 [664520, 1046094] processed_samples 10500 unjoint_samples 10500 joint_samples 30 [500741, 1042259] processed_samples 10500 unjoint_samples 10500 joint_samples 31 [453206, 1046983] processed_samples 10501 unjoint_samples 10500 joint_samples 30 [800445, 1036095] processed_samples 10500 unjoint_samples 10500 joint_samples 31 [843113, 1046455] processed_samples 10500 unjoint_samples 10500 joint_samples 30 [1046830, 770767] processed_samples 10501 unjoint_samples 10500 joint_samples 30 [641718, 1047809] processed_samples 10500 unjoint_samples 10500 joint_samples 31 [664520, 1046094] processed_samples 10500 unjoint_samples 10500 joint_samples 30 [500741, 1042259] processed_samples 10500 unjoint_samples 10500 joint_samples 31 [453206, 1046983] processed_samples 10501 unjoint_samples 10500 joint_samples 31 [962684, 1045753] processed_samples 10500 unjoint_samples 10500 joint_samples 30 [1046830, 770767] processed_samples 10501 unjoint_samples 10500 joint_samples 30 [800445, 1036095] processed_samples 10501 unjoint_samples 10500 joint_samples 30 [641718, 1047809] processed_samples 10500 unjoint_samples 10500 joint_samples 31 [843113, 1046455] [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure processed_samples 10501 unjoint_samples 10500 joint_samples 31 [962684, 1045753] [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure processed_samples 10600 unjoint_samples 10600 joint_samples 31 [85810, 999864] processed_samples 10600 unjoint_samples 10600 joint_samples 32 [1034302, 210219] processed_samples 10600 unjoint_samples 10600 joint_samples 31 [85810, 999864] processed_samples 10600 unjoint_samples 10600 joint_samples 32 [1034302, 210219] processed_samples 10600 unjoint_samples 10600 joint_samples 31 [981815, 1046094] processed_samples 10601 unjoint_samples 10600 joint_samples 31 [139502, 1039407] processed_samples 10600 unjoint_samples 10600 joint_samples 31 [981815, 1046094] processed_samples 10601 unjoint_samples 10600 joint_samples 31 [139502, 1039407] processed_samples 10601 unjoint_samples 10600 joint_samples 32 [1035445, 250696] processed_samples 10601 unjoint_samples 10600 joint_samples 32 [1035445, 250696] processed_samples 10600 unjoint_samples 10600 joint_samples 31 [724265, 1046983] processed_samples 10600 unjoint_samples 10600 joint_samples 31 [724265, 1046983] processed_samples 10600 unjoint_samples 10600 joint_samples 30 [829573, 1042259] processed_samples 10600 unjoint_samples 10600 joint_samples 30 [829573, 1042259] processed_samples 10601 unjoint_samples 10600 joint_samples 31 [1011866, 57663] processed_samples 10601 unjoint_samples 10600 joint_samples 31 [1011866, 57663] [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure processed_samples 10700 unjoint_samples 10700 joint_samples 31 [410844, 999864] processed_samples 10700 unjoint_samples 10700 joint_samples 32 [1047125, 258881] [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1045901, 112895] processed_samples 10700 unjoint_samples 10700 joint_samples 32 [1034302, 507001] [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure processed_samples 10701 unjoint_samples 10700 joint_samples 31 [1011866, 396747] processed_samples 10701 unjoint_samples 10700 joint_samples 32 [1035445, 511244] processed_samples 10701 unjoint_samples 10700 joint_samples 31 [411229, 1039407] [h264 @ 0x561288d5f900] mmco: unref short failure processed_samples 10700 unjoint_samples 10700 joint_samples 31 [971292, 1046983] [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure processed_samples 10700 unjoint_samples 10700 joint_samples 31 [410844, 999864] processed_samples 10700 unjoint_samples 10700 joint_samples 32 [1047125, 258881] processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1045901, 112895] processed_samples 10700 unjoint_samples 10700 joint_samples 32 [1034302, 507001] [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure processed_samples 10701 unjoint_samples 10700 joint_samples 31 [1011866, 396747] processed_samples 10701 unjoint_samples 10700 joint_samples 32 [1035445, 511244] processed_samples 10701 unjoint_samples 10700 joint_samples 31 [411229, 1039407] [h264 @ 0x55e22b5d3980] mmco: unref short failure processed_samples 10700 unjoint_samples 10700 joint_samples 31 [971292, 1046983] [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1047125, 604215] [h264 @ 0x561288fff700] mmco: unref short failure processed_samples 10800 unjoint_samples 10800 joint_samples 31 [1045901, 340482] processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1021268, 497592] processed_samples 10801 unjoint_samples 10800 joint_samples 31 [695932, 1039407] processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1034302, 811537] processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1047125, 604215] processed_samples 10801 unjoint_samples 10800 joint_samples 31 [1011866, 750727] [h264 @ 0x55e2290ef580] mmco: unref short failure processed_samples 10800 unjoint_samples 10800 joint_samples 31 [804631, 999864] processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1021268, 497592] processed_samples 10800 unjoint_samples 10800 joint_samples 31 [1045901, 340482] processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1034302, 811537] processed_samples 10801 unjoint_samples 10800 joint_samples 32 [1035445, 848184] processed_samples 10801 unjoint_samples 10800 joint_samples 31 [695932, 1039407] [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure processed_samples 10800 unjoint_samples 10800 joint_samples 31 [804631, 999864] processed_samples 10801 unjoint_samples 10800 joint_samples 31 [1011866, 750727] [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure processed_samples 10801 unjoint_samples 10800 joint_samples 32 [1035445, 848184] [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure processed_samples 10900 unjoint_samples 10900 joint_samples 33 [1037143, 75862] processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1048250, 85505] processed_samples 10900 unjoint_samples 10900 joint_samples 33 [1037143, 75862] processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1048250, 85505] processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1047125, 877275] processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1021268, 821172] processed_samples 10901 unjoint_samples 10900 joint_samples 33 [256400, 1034666] processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1047125, 877275] processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1021268, 821172] processed_samples 10901 unjoint_samples 10900 joint_samples 33 [256400, 1034666] processed_samples 10900 unjoint_samples 10900 joint_samples 31 [1045901, 722753] processed_samples 10900 unjoint_samples 10900 joint_samples 31 [1045901, 722753] processed_samples 10901 unjoint_samples 10900 joint_samples 31 [1011866, 1006283] processed_samples 10901 unjoint_samples 10900 joint_samples 31 [1011866, 1006283] processed_samples 10901 unjoint_samples 10900 joint_samples 31 [1041569, 1041661] processed_samples 10901 unjoint_samples 10900 joint_samples 31 [1041569, 1041661] [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure processed_samples 11000 unjoint_samples 11000 joint_samples 33 [116198, 1046892] [h264 @ 0x55e228b387c0] mmco: unref short failure processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1037143, 501990] processed_samples 11001 unjoint_samples 11000 joint_samples 32 [330665, 1046558] [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1048250, 453659] processed_samples 11001 unjoint_samples 11000 joint_samples 33 [497398, 1034666] [h264 @ 0x561289115f80] mmco: unref short failure processed_samples 11000 unjoint_samples 11000 joint_samples 33 [116198, 1046892] processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1046509, 27980] processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1037143, 501990] processed_samples 11001 unjoint_samples 11000 joint_samples 32 [353726, 1008326] processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1048250, 453659] processed_samples 11000 unjoint_samples 11000 joint_samples 31 [1045901, 1021475] processed_samples 11001 unjoint_samples 11000 joint_samples 32 [330665, 1046558] [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure processed_samples 11001 unjoint_samples 11000 joint_samples 33 [497398, 1034666] processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1046509, 27980] processed_samples 11001 unjoint_samples 11000 joint_samples 32 [353726, 1008326] processed_samples 11000 unjoint_samples 11000 joint_samples 31 [1045901, 1021475] [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure processed_samples 11100 unjoint_samples 11100 joint_samples 32 [255841, 1039662] processed_samples 11100 unjoint_samples 11100 joint_samples 33 [382617, 1046892] processed_samples 11101 unjoint_samples 11100 joint_samples 33 [773577, 1034666] processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1037143, 753303] processed_samples 11100 unjoint_samples 11100 joint_samples 32 [1048250, 697253] processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1046509, 342419] processed_samples 11101 unjoint_samples 11100 joint_samples 32 [620448, 1046558] [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure processed_samples 11101 unjoint_samples 11100 joint_samples 32 [744837, 1008326] [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure processed_samples 11100 unjoint_samples 11100 joint_samples 33 [382617, 1046892] processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1037143, 753303] processed_samples 11101 unjoint_samples 11100 joint_samples 33 [773577, 1034666] processed_samples 11100 unjoint_samples 11100 joint_samples 32 [255841, 1039662] processed_samples 11101 unjoint_samples 11100 joint_samples 32 [620448, 1046558] processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1046509, 342419] [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure processed_samples 11101 unjoint_samples 11100 joint_samples 32 [744837, 1008326] processed_samples 11100 unjoint_samples 11100 joint_samples 32 [1048250, 697253] [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 11200 unjoint_samples 11200 joint_samples 34 [142434, 1029604] processed_samples 11200 unjoint_samples 11200 joint_samples 33 [141402, 1045305] processed_samples 11200 unjoint_samples 11200 joint_samples 34 [142434, 1029604] processed_samples 11200 unjoint_samples 11200 joint_samples 33 [141402, 1045305] processed_samples 11201 unjoint_samples 11200 joint_samples 34 [69591, 1046390] processed_samples 11200 unjoint_samples 11200 joint_samples 32 [755871, 1039662] processed_samples 11200 unjoint_samples 11200 joint_samples 32 [755871, 1039662] processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1046509, 640567] processed_samples 11201 unjoint_samples 11200 joint_samples 34 [69591, 1046390] processed_samples 11200 unjoint_samples 11200 joint_samples 33 [744386, 1046892] processed_samples 11200 unjoint_samples 11200 joint_samples 33 [744386, 1046892] processed_samples 11201 unjoint_samples 11200 joint_samples 32 [1027999, 1026973] processed_samples 11201 unjoint_samples 11200 joint_samples 32 [962520, 1046558] [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1046509, 640567] processed_samples 11201 unjoint_samples 11200 joint_samples 32 [962520, 1046558] [h264 @ 0x55e22b30be80] mmco: unref short failure processed_samples 11201 unjoint_samples 11200 joint_samples 32 [1027999, 1026973] [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e21dfd4600] mmco: unref short failure [h264 @ 0x55e21dfd4600] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure processed_samples 11300 unjoint_samples 11300 joint_samples 34 [501209, 1029604] processed_samples 11300 unjoint_samples 11300 joint_samples 34 [1045244, 48372] processed_samples 11301 unjoint_samples 11300 joint_samples 33 [289724, 1046755] processed_samples 11300 unjoint_samples 11300 joint_samples 34 [501209, 1029604] processed_samples 11300 unjoint_samples 11300 joint_samples 34 [1045244, 48372] processed_samples 11301 unjoint_samples 11300 joint_samples 33 [1030135, 387149] processed_samples 11300 unjoint_samples 11300 joint_samples 33 [524312, 1045305] processed_samples 11301 unjoint_samples 11300 joint_samples 33 [289724, 1046755] processed_samples 11301 unjoint_samples 11300 joint_samples 34 [471358, 1046390] processed_samples 11300 unjoint_samples 11300 joint_samples 33 [524312, 1045305] processed_samples 11301 unjoint_samples 11300 joint_samples 33 [1030135, 387149] processed_samples 11300 unjoint_samples 11300 joint_samples 33 [24907, 1041977] processed_samples 11301 unjoint_samples 11300 joint_samples 34 [471358, 1046390] processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1046509, 899453] processed_samples 11300 unjoint_samples 11300 joint_samples 33 [24907, 1041977] [h264 @ 0x55e2294b6d80] mmco: unref short failure processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1046509, 899453] [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1045244, 439524] processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1045244, 439524] processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1046509, 317741] processed_samples 11400 unjoint_samples 11400 joint_samples 33 [384819, 1041977] processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1046509, 317741] processed_samples 11400 unjoint_samples 11400 joint_samples 33 [384819, 1041977] processed_samples 11401 unjoint_samples 11400 joint_samples 33 [1030135, 677382] processed_samples 11401 unjoint_samples 11400 joint_samples 33 [1030135, 677382] processed_samples 11400 unjoint_samples 11400 joint_samples 34 [795511, 1029604] processed_samples 11400 unjoint_samples 11400 joint_samples 34 [795511, 1029604] processed_samples 11401 unjoint_samples 11400 joint_samples 33 [597168, 1046755] processed_samples 11401 unjoint_samples 11400 joint_samples 33 [597168, 1046755] processed_samples 11400 unjoint_samples 11400 joint_samples 33 [880070, 1045305] processed_samples 11401 unjoint_samples 11400 joint_samples 34 [746714, 1046390] processed_samples 11401 unjoint_samples 11400 joint_samples 34 [746714, 1046390] processed_samples 11400 unjoint_samples 11400 joint_samples 33 [880070, 1045305] [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1045129, 181305] processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1045129, 181305] processed_samples 11500 unjoint_samples 11500 joint_samples 35 [123311, 1043623] processed_samples 11500 unjoint_samples 11500 joint_samples 35 [123311, 1043623] processed_samples 11500 unjoint_samples 11500 joint_samples 33 [687750, 1041977] processed_samples 11501 unjoint_samples 11500 joint_samples 33 [1030135, 926059] processed_samples 11501 unjoint_samples 11500 joint_samples 33 [1030135, 926059] processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1046509, 617797] processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1046509, 617797] processed_samples 11501 unjoint_samples 11500 joint_samples 35 [1045793, 21842] processed_samples 11501 unjoint_samples 11500 joint_samples 35 [1045793, 21842] processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1045244, 692086] processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1045244, 692086] processed_samples 11500 unjoint_samples 11500 joint_samples 33 [687750, 1041977] processed_samples 11501 unjoint_samples 11500 joint_samples 33 [857319, 1046755] processed_samples 11501 unjoint_samples 11500 joint_samples 33 [857319, 1046755] [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure processed_samples 11600 unjoint_samples 11600 joint_samples 35 [459511, 1043623] processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1045129, 523596] processed_samples 11601 unjoint_samples 11600 joint_samples 34 [244751, 1030961] processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1046509, 871409] processed_samples 11600 unjoint_samples 11600 joint_samples 33 [931627, 1041977] processed_samples 11600 unjoint_samples 11600 joint_samples 35 [1045244, 9579] processed_samples 11600 unjoint_samples 11600 joint_samples 35 [459511, 1043623] processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1045129, 523596] processed_samples 11601 unjoint_samples 11600 joint_samples 34 [244751, 1030961] processed_samples 11600 unjoint_samples 11600 joint_samples 35 [1045244, 9579] processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1046509, 871409] processed_samples 11600 unjoint_samples 11600 joint_samples 33 [931627, 1041977] processed_samples 11601 unjoint_samples 11600 joint_samples 35 [1045793, 275058] processed_samples 11601 unjoint_samples 11600 joint_samples 35 [1045793, 275058] processed_samples 11601 unjoint_samples 11600 joint_samples 34 [136271, 1046755] processed_samples 11601 unjoint_samples 11600 joint_samples 34 [136271, 1046755] [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure processed_samples 11700 unjoint_samples 11700 joint_samples 35 [760357, 1043623] processed_samples 11700 unjoint_samples 11700 joint_samples 35 [760357, 1043623] processed_samples 11700 unjoint_samples 11700 joint_samples 35 [1045244, 456921] processed_samples 11700 unjoint_samples 11700 joint_samples 35 [1045244, 456921] [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure processed_samples 11700 unjoint_samples 11700 joint_samples 35 [290888, 1044365] [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure processed_samples 11700 unjoint_samples 11700 joint_samples 34 [239977, 1044218] processed_samples 11701 unjoint_samples 11700 joint_samples 34 [600744, 1030961] processed_samples 11700 unjoint_samples 11700 joint_samples 35 [290888, 1044365] processed_samples 11701 unjoint_samples 11700 joint_samples 35 [1045793, 527180] processed_samples 11700 unjoint_samples 11700 joint_samples 34 [239977, 1044218] processed_samples 11701 unjoint_samples 11700 joint_samples 34 [600744, 1030961] processed_samples 11701 unjoint_samples 11700 joint_samples 35 [1045793, 527180] processed_samples 11701 unjoint_samples 11700 joint_samples 34 [520096, 1046755] processed_samples 11700 unjoint_samples 11700 joint_samples 34 [1045129, 863755] processed_samples 11701 unjoint_samples 11700 joint_samples 34 [520096, 1046755] processed_samples 11700 unjoint_samples 11700 joint_samples 34 [1045129, 863755] [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1045244, 734304] processed_samples 11800 unjoint_samples 11800 joint_samples 35 [239777, 1042446] processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1000932, 1043623] processed_samples 11800 unjoint_samples 11800 joint_samples 34 [586321, 1044218] processed_samples 11800 unjoint_samples 11800 joint_samples 35 [511575, 1044365] processed_samples 11801 unjoint_samples 11800 joint_samples 34 [866215, 1046755] processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1045244, 734304] [h264 @ 0x56128979f6c0] mmco: unref short failure processed_samples 11801 unjoint_samples 11800 joint_samples 34 [877268, 1030961] processed_samples 11800 unjoint_samples 11800 joint_samples 35 [239777, 1042446] processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1000932, 1043623] processed_samples 11800 unjoint_samples 11800 joint_samples 34 [586321, 1044218] processed_samples 11801 unjoint_samples 11800 joint_samples 35 [1045793, 780996] processed_samples 11800 unjoint_samples 11800 joint_samples 35 [511575, 1044365] processed_samples 11801 unjoint_samples 11800 joint_samples 34 [866215, 1046755] processed_samples 11801 unjoint_samples 11800 joint_samples 34 [877268, 1030961] processed_samples 11801 unjoint_samples 11800 joint_samples 35 [1045793, 780996] [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 11900 unjoint_samples 11900 joint_samples 36 [1039239, 317932] processed_samples 11900 unjoint_samples 11900 joint_samples 35 [543370, 1042446] processed_samples 11900 unjoint_samples 11900 joint_samples 36 [1039239, 317932] processed_samples 11900 unjoint_samples 11900 joint_samples 35 [543370, 1042446] processed_samples 11901 unjoint_samples 11900 joint_samples 35 [1017725, 316899] processed_samples 11901 unjoint_samples 11900 joint_samples 35 [1017725, 316899] processed_samples 11900 unjoint_samples 11900 joint_samples 36 [28459, 1034983] processed_samples 11900 unjoint_samples 11900 joint_samples 35 [841324, 1044365] processed_samples 11900 unjoint_samples 11900 joint_samples 35 [841324, 1044365] processed_samples 11900 unjoint_samples 11900 joint_samples 36 [28459, 1034983] processed_samples 11900 unjoint_samples 11900 joint_samples 34 [907045, 1044218] processed_samples 11900 unjoint_samples 11900 joint_samples 34 [907045, 1044218] processed_samples 11901 unjoint_samples 11900 joint_samples 36 [25761, 1047772] processed_samples 11901 unjoint_samples 11900 joint_samples 36 [25761, 1047772] processed_samples 11901 unjoint_samples 11900 joint_samples 35 [1044644, 202109] processed_samples 11901 unjoint_samples 11900 joint_samples 35 [1044644, 202109] [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 12000 unjoint_samples 12000 joint_samples 36 [350031, 1034983] processed_samples 12000 unjoint_samples 12000 joint_samples 36 [350031, 1034983] processed_samples 12001 unjoint_samples 12000 joint_samples 35 [1017725, 628565] processed_samples 12000 unjoint_samples 12000 joint_samples 35 [162752, 1046884] processed_samples 12001 unjoint_samples 12000 joint_samples 35 [1017725, 628565] processed_samples 12000 unjoint_samples 12000 joint_samples 35 [162752, 1046884] processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1046404, 196591] processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1046404, 196591] processed_samples 12001 unjoint_samples 12000 joint_samples 36 [268056, 1047772] processed_samples 12001 unjoint_samples 12000 joint_samples 36 [268056, 1047772] processed_samples 12000 unjoint_samples 12000 joint_samples 35 [959924, 1042446] processed_samples 12000 unjoint_samples 12000 joint_samples 35 [959924, 1042446] processed_samples 12001 unjoint_samples 12000 joint_samples 35 [1044644, 509748] processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1039239, 602849] processed_samples 12001 unjoint_samples 12000 joint_samples 35 [1044644, 509748] processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1039239, 602849] [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure processed_samples 12100 unjoint_samples 12100 joint_samples 35 [510485, 1046884] processed_samples 12100 unjoint_samples 12100 joint_samples 36 [648491, 1034983] processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1046404, 523821] processed_samples 12100 unjoint_samples 12100 joint_samples 35 [510485, 1046884] processed_samples 12100 unjoint_samples 12100 joint_samples 36 [648491, 1034983] processed_samples 12100 unjoint_samples 12100 joint_samples 36 [273515, 1046406] [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure processed_samples 12100 unjoint_samples 12100 joint_samples 36 [273515, 1046406] processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1039239, 903419] processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1046404, 523821] processed_samples 12101 unjoint_samples 12100 joint_samples 36 [604513, 1047772] [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure processed_samples 12101 unjoint_samples 12100 joint_samples 36 [604513, 1047772] processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1039239, 903419] processed_samples 12101 unjoint_samples 12100 joint_samples 35 [1023331, 1024142] processed_samples 12101 unjoint_samples 12100 joint_samples 35 [1044644, 784537] processed_samples 12101 unjoint_samples 12100 joint_samples 35 [1044644, 784537] processed_samples 12101 unjoint_samples 12100 joint_samples 35 [1023331, 1024142] [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure processed_samples 12200 unjoint_samples 12200 joint_samples 35 [794799, 1046884] processed_samples 12200 unjoint_samples 12200 joint_samples 37 [223899, 1044556] processed_samples 12201 unjoint_samples 12200 joint_samples 36 [1032190, 279992] processed_samples 12200 unjoint_samples 12200 joint_samples 36 [578750, 1046406] processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1046404, 952211] processed_samples 12200 unjoint_samples 12200 joint_samples 37 [1032797, 25887] processed_samples 12200 unjoint_samples 12200 joint_samples 35 [794799, 1046884] processed_samples 12200 unjoint_samples 12200 joint_samples 37 [223899, 1044556] processed_samples 12200 unjoint_samples 12200 joint_samples 36 [578750, 1046406] processed_samples 12201 unjoint_samples 12200 joint_samples 36 [1032190, 279992] processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1046404, 952211] processed_samples 12200 unjoint_samples 12200 joint_samples 37 [1032797, 25887] processed_samples 12201 unjoint_samples 12200 joint_samples 36 [1044644, 140588] [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure processed_samples 12201 unjoint_samples 12200 joint_samples 36 [1044644, 140588] [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 12201 unjoint_samples 12200 joint_samples 36 [806085, 1047772] processed_samples 12201 unjoint_samples 12200 joint_samples 36 [806085, 1047772] [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 12300 unjoint_samples 12300 joint_samples 37 [1032797, 352127] [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure processed_samples 12300 unjoint_samples 12300 joint_samples 36 [1046217, 166632] processed_samples 12301 unjoint_samples 12300 joint_samples 37 [216915, 1047816] [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure processed_samples 12300 unjoint_samples 12300 joint_samples 37 [255797, 1046324] processed_samples 12301 unjoint_samples 12300 joint_samples 36 [1032190, 564128] processed_samples 12300 unjoint_samples 12300 joint_samples 36 [787986, 1046406] processed_samples 12300 unjoint_samples 12300 joint_samples 37 [560803, 1044556] processed_samples 12301 unjoint_samples 12300 joint_samples 36 [1044644, 468347] processed_samples 12300 unjoint_samples 12300 joint_samples 37 [1032797, 352127] [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure processed_samples 12300 unjoint_samples 12300 joint_samples 36 [1046217, 166632] processed_samples 12301 unjoint_samples 12300 joint_samples 37 [216915, 1047816] processed_samples 12300 unjoint_samples 12300 joint_samples 37 [255797, 1046324] processed_samples 12300 unjoint_samples 12300 joint_samples 37 [560803, 1044556] processed_samples 12301 unjoint_samples 12300 joint_samples 36 [1032190, 564128] processed_samples 12300 unjoint_samples 12300 joint_samples 36 [787986, 1046406] [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure processed_samples 12301 unjoint_samples 12300 joint_samples 36 [1044644, 468347] [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x561289f39080] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1037592, 140708] processed_samples 12400 unjoint_samples 12400 joint_samples 36 [1046217, 563634] processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1032797, 580136] processed_samples 12400 unjoint_samples 12400 joint_samples 37 [837066, 1044556] processed_samples 12400 unjoint_samples 12400 joint_samples 37 [465265, 1046324] processed_samples 12401 unjoint_samples 12400 joint_samples 36 [1044644, 866155] processed_samples 12401 unjoint_samples 12400 joint_samples 37 [514307, 1047816] processed_samples 12401 unjoint_samples 12400 joint_samples 36 [1032190, 856740] [h264 @ 0x55e22c2ae040] mmco: unref short failure processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1037592, 140708] processed_samples 12400 unjoint_samples 12400 joint_samples 36 [1046217, 563634] processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1032797, 580136] [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure processed_samples 12400 unjoint_samples 12400 joint_samples 37 [837066, 1044556] processed_samples 12400 unjoint_samples 12400 joint_samples 37 [465265, 1046324] processed_samples 12401 unjoint_samples 12400 joint_samples 37 [514307, 1047816] processed_samples 12401 unjoint_samples 12400 joint_samples 36 [1032190, 856740] processed_samples 12401 unjoint_samples 12400 joint_samples 36 [1044644, 866155] [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [mov,mp4,m4a,3gp,3g2,mj2 @ 0x55e22a7cc0c0] stream 1, offset 0x1400a4d: partial file [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [mov,mp4,m4a,3gp,3g2,mj2 @ 0x56128943a500] stream 1, offset 0x1400a4d: partial file [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure processed_samples 12501 unjoint_samples 12500 joint_samples 37 [1037707, 46550] processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1037592, 424789] processed_samples 12501 unjoint_samples 12500 joint_samples 37 [140136, 1047258] processed_samples 12500 unjoint_samples 12500 joint_samples 36 [1046217, 848315] processed_samples 12501 unjoint_samples 12500 joint_samples 38 [1032860, 150701] processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1037592, 424789] processed_samples 12501 unjoint_samples 12500 joint_samples 37 [1037707, 46550] processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1032797, 852864] processed_samples 12500 unjoint_samples 12500 joint_samples 36 [1046217, 848315] processed_samples 12501 unjoint_samples 12500 joint_samples 37 [140136, 1047258] processed_samples 12500 unjoint_samples 12500 joint_samples 37 [762448, 1046324] processed_samples 12501 unjoint_samples 12500 joint_samples 38 [1032860, 150701] [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1032797, 852864] [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure processed_samples 12501 unjoint_samples 12500 joint_samples 37 [864669, 1047816] [h264 @ 0x55e229eee4c0] mmco: unref short failure processed_samples 12500 unjoint_samples 12500 joint_samples 37 [762448, 1046324] processed_samples 12501 unjoint_samples 12500 joint_samples 37 [864669, 1047816] [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1046217, 88623] processed_samples 12600 unjoint_samples 12600 joint_samples 38 [109278, 1048119] processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1037592, 768275] processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1046217, 88623] processed_samples 12600 unjoint_samples 12600 joint_samples 38 [109278, 1048119] processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1037592, 768275] processed_samples 12600 unjoint_samples 12600 joint_samples 38 [1026711, 36383] processed_samples 12600 unjoint_samples 12600 joint_samples 38 [1026711, 36383] processed_samples 12601 unjoint_samples 12600 joint_samples 38 [991838, 219001] processed_samples 12601 unjoint_samples 12600 joint_samples 38 [991838, 219001] processed_samples 12601 unjoint_samples 12600 joint_samples 38 [1032860, 538020] processed_samples 12601 unjoint_samples 12600 joint_samples 37 [1037707, 360465] processed_samples 12601 unjoint_samples 12600 joint_samples 37 [475222, 1047258] processed_samples 12601 unjoint_samples 12600 joint_samples 38 [1032860, 538020] processed_samples 12601 unjoint_samples 12600 joint_samples 37 [1037707, 360465] processed_samples 12601 unjoint_samples 12600 joint_samples 37 [475222, 1047258] [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612897059c0] mmco: unref short failure [h264 @ 0x5612897059c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure processed_samples 12700 unjoint_samples 12700 joint_samples 38 [30581, 1035186] processed_samples 12700 unjoint_samples 12700 joint_samples 38 [30581, 1035186] [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure processed_samples 12701 unjoint_samples 12700 joint_samples 38 [991838, 605745] processed_samples 12701 unjoint_samples 12700 joint_samples 38 [991838, 605745] processed_samples 12700 unjoint_samples 12700 joint_samples 38 [518432, 1048119] processed_samples 12700 unjoint_samples 12700 joint_samples 38 [518432, 1048119] processed_samples 12700 unjoint_samples 12700 joint_samples 38 [1026711, 281324] processed_samples 12700 unjoint_samples 12700 joint_samples 38 [1026711, 281324] processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1046217, 405501] processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1046217, 405501] processed_samples 12701 unjoint_samples 12700 joint_samples 38 [1032860, 820671] processed_samples 12701 unjoint_samples 12700 joint_samples 38 [1032860, 820671] processed_samples 12701 unjoint_samples 12700 joint_samples 37 [1037707, 684648] processed_samples 12701 unjoint_samples 12700 joint_samples 37 [1037707, 684648] processed_samples 12701 unjoint_samples 12700 joint_samples 37 [792974, 1047258] processed_samples 12701 unjoint_samples 12700 joint_samples 37 [792974, 1047258] [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure processed_samples 12800 unjoint_samples 12800 joint_samples 38 [253305, 1035186] processed_samples 12800 unjoint_samples 12800 joint_samples 38 [253305, 1035186] processed_samples 12800 unjoint_samples 12800 joint_samples 38 [842911, 1048119] [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure processed_samples 12801 unjoint_samples 12800 joint_samples 39 [148326, 1025113] processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1026711, 574549] processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1026711, 574549] processed_samples 12800 unjoint_samples 12800 joint_samples 37 [1046217, 703919] processed_samples 12800 unjoint_samples 12800 joint_samples 38 [842911, 1048119] processed_samples 12800 unjoint_samples 12800 joint_samples 37 [1046217, 703919] processed_samples 12801 unjoint_samples 12800 joint_samples 38 [1045527, 30375] processed_samples 12801 unjoint_samples 12800 joint_samples 38 [1045527, 30375] [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure processed_samples 12801 unjoint_samples 12800 joint_samples 39 [148326, 1025113] processed_samples 12801 unjoint_samples 12800 joint_samples 38 [991838, 867194] processed_samples 12801 unjoint_samples 12800 joint_samples 37 [1037707, 925625] processed_samples 12801 unjoint_samples 12800 joint_samples 38 [991838, 867194] processed_samples 12801 unjoint_samples 12800 joint_samples 37 [1037707, 925625] [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1032664, 135079] processed_samples 12900 unjoint_samples 12900 joint_samples 38 [697548, 1035186] processed_samples 12901 unjoint_samples 12900 joint_samples 39 [1032709, 94551] processed_samples 12901 unjoint_samples 12900 joint_samples 38 [1046880, 200975] processed_samples 12901 unjoint_samples 12900 joint_samples 38 [1045527, 339006] processed_samples 12901 unjoint_samples 12900 joint_samples 39 [420309, 1025113] processed_samples 12900 unjoint_samples 12900 joint_samples 37 [1046217, 990549] processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1026711, 843761] [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1032664, 135079] processed_samples 12901 unjoint_samples 12900 joint_samples 39 [1032709, 94551] processed_samples 12900 unjoint_samples 12900 joint_samples 38 [697548, 1035186] processed_samples 12901 unjoint_samples 12900 joint_samples 38 [1046880, 200975] processed_samples 12901 unjoint_samples 12900 joint_samples 39 [420309, 1025113] [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure processed_samples 12901 unjoint_samples 12900 joint_samples 38 [1045527, 339006] processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1026711, 843761] [h264 @ 0x55e22a960b00] mmco: unref short failure processed_samples 12900 unjoint_samples 12900 joint_samples 37 [1046217, 990549] [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure processed_samples 13000 unjoint_samples 13000 joint_samples 38 [309605, 1038439] processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1032664, 453832] [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure processed_samples 13001 unjoint_samples 13000 joint_samples 39 [1032709, 444626] processed_samples 13001 unjoint_samples 13000 joint_samples 38 [1046880, 468748] processed_samples 13000 unjoint_samples 13000 joint_samples 39 [988754, 77968] processed_samples 13001 unjoint_samples 13000 joint_samples 39 [711282, 1025113] processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1036178, 1038302] [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure processed_samples 13000 unjoint_samples 13000 joint_samples 38 [309605, 1038439] processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1032664, 453832] processed_samples 13000 unjoint_samples 13000 joint_samples 39 [988754, 77968] processed_samples 13001 unjoint_samples 13000 joint_samples 39 [1032709, 444626] processed_samples 13001 unjoint_samples 13000 joint_samples 38 [1046880, 468748] processed_samples 13001 unjoint_samples 13000 joint_samples 39 [711282, 1025113] processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1036178, 1038302] processed_samples 13001 unjoint_samples 13000 joint_samples 38 [1045527, 590088] processed_samples 13001 unjoint_samples 13000 joint_samples 38 [1045527, 590088] [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 13100 unjoint_samples 13100 joint_samples 39 [988754, 374937] [h264 @ 0x561289f0e680] mmco: unref short failure processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1032664, 726861] processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046201, 459760] processed_samples 13101 unjoint_samples 13100 joint_samples 38 [1046880, 815303] processed_samples 13100 unjoint_samples 13100 joint_samples 38 [599657, 1038439] [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure processed_samples 13101 unjoint_samples 13100 joint_samples 39 [1032709, 876620] processed_samples 13101 unjoint_samples 13100 joint_samples 38 [1045527, 998839] processed_samples 13101 unjoint_samples 13100 joint_samples 39 [988895, 1025113] [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure processed_samples 13100 unjoint_samples 13100 joint_samples 39 [988754, 374937] [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure processed_samples 13100 unjoint_samples 13100 joint_samples 38 [599657, 1038439] processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1032664, 726861] processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046201, 459760] processed_samples 13101 unjoint_samples 13100 joint_samples 39 [1032709, 876620] processed_samples 13101 unjoint_samples 13100 joint_samples 38 [1046880, 815303] [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure processed_samples 13101 unjoint_samples 13100 joint_samples 38 [1045527, 998839] processed_samples 13101 unjoint_samples 13100 joint_samples 39 [988895, 1025113] [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228d33800] mmco: unref short failure [h264 @ 0x55e228d33800] mmco: unref short failure [h264 @ 0x55e228d33800] mmco: unref short failure [h264 @ 0x55e228d33800] mmco: unref short failure [h264 @ 0x56128883c6c0] mmco: unref short failure [h264 @ 0x56128883c6c0] mmco: unref short failure [h264 @ 0x56128883c6c0] mmco: unref short failure [h264 @ 0x56128883c6c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure processed_samples 13201 unjoint_samples 13200 joint_samples 39 [1047108, 53660] processed_samples 13200 unjoint_samples 13200 joint_samples 39 [988754, 647986] processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046201, 730453] processed_samples 13201 unjoint_samples 13200 joint_samples 40 [217479, 1030889] processed_samples 13201 unjoint_samples 13200 joint_samples 39 [1047624, 285306] [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure processed_samples 13200 unjoint_samples 13200 joint_samples 38 [973033, 1038439] processed_samples 13201 unjoint_samples 13200 joint_samples 40 [1039471, 98638] processed_samples 13200 unjoint_samples 13200 joint_samples 39 [988754, 647986] processed_samples 13201 unjoint_samples 13200 joint_samples 39 [1047108, 53660] processed_samples 13201 unjoint_samples 13200 joint_samples 40 [217479, 1030889] processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1032664, 1014586] [h264 @ 0x55e22c2ae040] mmco: unref short failure processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046201, 730453] processed_samples 13201 unjoint_samples 13200 joint_samples 40 [1039471, 98638] processed_samples 13201 unjoint_samples 13200 joint_samples 39 [1047624, 285306] [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1032664, 1014586] processed_samples 13200 unjoint_samples 13200 joint_samples 38 [973033, 1038439] [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [mov,mp4,m4a,3gp,3g2,mj2 @ 0x56128a5934c0] stream 0, offset 0x101588b: partial file [mov,mp4,m4a,3gp,3g2,mj2 @ 0x55e22a90fec0] stream 0, offset 0x101588b: partial file [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure processed_samples 13300 unjoint_samples 13300 joint_samples 39 [283664, 1046361] processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1034906, 399760] processed_samples 13301 unjoint_samples 13300 joint_samples 40 [1039471, 399463] [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 13301 unjoint_samples 13300 joint_samples 39 [1047624, 549050] processed_samples 13301 unjoint_samples 13300 joint_samples 40 [499264, 1030889] processed_samples 13301 unjoint_samples 13300 joint_samples 39 [1047108, 410085] processed_samples 13301 unjoint_samples 13300 joint_samples 39 [988754, 892894] processed_samples 13300 unjoint_samples 13300 joint_samples 39 [1046201, 970111] [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure processed_samples 13300 unjoint_samples 13300 joint_samples 39 [283664, 1046361] processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1034906, 399760] processed_samples 13301 unjoint_samples 13300 joint_samples 40 [1039471, 399463] [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 13301 unjoint_samples 13300 joint_samples 39 [1047624, 549050] processed_samples 13301 unjoint_samples 13300 joint_samples 40 [499264, 1030889] processed_samples 13301 unjoint_samples 13300 joint_samples 39 [1047108, 410085] processed_samples 13300 unjoint_samples 13300 joint_samples 39 [1046201, 970111] [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure processed_samples 13301 unjoint_samples 13300 joint_samples 39 [988754, 892894] [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561288b2c5c0] Missing reference picture, default is 65530 [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229572500] Missing reference picture, default is 65530 [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure processed_samples 13400 unjoint_samples 13400 joint_samples 39 [545883, 1046361] processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1034906, 740212] [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 13400 unjoint_samples 13400 joint_samples 40 [196989, 1034779] processed_samples 13401 unjoint_samples 13400 joint_samples 40 [26826, 1044780] processed_samples 13401 unjoint_samples 13400 joint_samples 40 [1039471, 770840] processed_samples 13400 unjoint_samples 13400 joint_samples 39 [545883, 1046361] processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1034906, 740212] processed_samples 13401 unjoint_samples 13400 joint_samples 39 [1047108, 798865] processed_samples 13401 unjoint_samples 13400 joint_samples 40 [798735, 1030889] processed_samples 13401 unjoint_samples 13400 joint_samples 40 [798735, 1030889] [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 13401 unjoint_samples 13400 joint_samples 40 [26826, 1044780] processed_samples 13400 unjoint_samples 13400 joint_samples 40 [196989, 1034779] processed_samples 13401 unjoint_samples 13400 joint_samples 39 [1047624, 839016] processed_samples 13401 unjoint_samples 13400 joint_samples 39 [1047108, 798865] processed_samples 13401 unjoint_samples 13400 joint_samples 40 [1039471, 770840] [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure processed_samples 13401 unjoint_samples 13400 joint_samples 39 [1047624, 839016] [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure processed_samples 13500 unjoint_samples 13500 joint_samples 41 [77605, 1032779] processed_samples 13500 unjoint_samples 13500 joint_samples 41 [77605, 1032779] processed_samples 13501 unjoint_samples 13500 joint_samples 40 [142453, 1045702] processed_samples 13501 unjoint_samples 13500 joint_samples 40 [142453, 1045702] processed_samples 13500 unjoint_samples 13500 joint_samples 40 [497450, 1034779] processed_samples 13500 unjoint_samples 13500 joint_samples 40 [497450, 1034779] processed_samples 13501 unjoint_samples 13500 joint_samples 41 [43783, 1039957] processed_samples 13501 unjoint_samples 13500 joint_samples 40 [30654, 1047441] processed_samples 13501 unjoint_samples 13500 joint_samples 41 [43783, 1039957] processed_samples 13501 unjoint_samples 13500 joint_samples 40 [30654, 1047441] processed_samples 13500 unjoint_samples 13500 joint_samples 39 [847409, 1046361] processed_samples 13501 unjoint_samples 13500 joint_samples 40 [411593, 1044780] processed_samples 13500 unjoint_samples 13500 joint_samples 39 [847409, 1046361] processed_samples 13501 unjoint_samples 13500 joint_samples 40 [411593, 1044780] processed_samples 13501 unjoint_samples 13500 joint_samples 40 [1039471, 1037172] processed_samples 13501 unjoint_samples 13500 joint_samples 40 [1039471, 1037172] [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure processed_samples 13600 unjoint_samples 13600 joint_samples 40 [1015423, 142110] processed_samples 13600 unjoint_samples 13600 joint_samples 41 [294176, 1032779] processed_samples 13600 unjoint_samples 13600 joint_samples 40 [775785, 1034779] processed_samples 13601 unjoint_samples 13600 joint_samples 41 [395129, 1039957] processed_samples 13601 unjoint_samples 13600 joint_samples 40 [450686, 1045702] processed_samples 13601 unjoint_samples 13600 joint_samples 40 [449041, 1047441] processed_samples 13601 unjoint_samples 13600 joint_samples 40 [649554, 1044780] processed_samples 13601 unjoint_samples 13600 joint_samples 41 [456609, 1039506] [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure processed_samples 13600 unjoint_samples 13600 joint_samples 40 [1015423, 142110] processed_samples 13600 unjoint_samples 13600 joint_samples 41 [294176, 1032779] processed_samples 13600 unjoint_samples 13600 joint_samples 40 [775785, 1034779] processed_samples 13601 unjoint_samples 13600 joint_samples 40 [450686, 1045702] processed_samples 13601 unjoint_samples 13600 joint_samples 41 [395129, 1039957] processed_samples 13601 unjoint_samples 13600 joint_samples 40 [449041, 1047441] processed_samples 13601 unjoint_samples 13600 joint_samples 41 [456609, 1039506] processed_samples 13601 unjoint_samples 13600 joint_samples 40 [649554, 1044780] [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure processed_samples 13700 unjoint_samples 13700 joint_samples 41 [136016, 1038564] processed_samples 13700 unjoint_samples 13700 joint_samples 41 [136016, 1038564] processed_samples 13701 unjoint_samples 13700 joint_samples 40 [800740, 1045702] processed_samples 13701 unjoint_samples 13700 joint_samples 40 [800740, 1045702] processed_samples 13700 unjoint_samples 13700 joint_samples 40 [1015423, 518234] processed_samples 13700 unjoint_samples 13700 joint_samples 41 [527525, 1032779] processed_samples 13700 unjoint_samples 13700 joint_samples 41 [527525, 1032779] processed_samples 13700 unjoint_samples 13700 joint_samples 40 [1015423, 518234] processed_samples 13701 unjoint_samples 13700 joint_samples 41 [684681, 1039957] processed_samples 13701 unjoint_samples 13700 joint_samples 41 [684681, 1039957] processed_samples 13701 unjoint_samples 13700 joint_samples 40 [748757, 1047441] processed_samples 13701 unjoint_samples 13700 joint_samples 40 [748757, 1047441] processed_samples 13701 unjoint_samples 13700 joint_samples 41 [692174, 1039506] processed_samples 13701 unjoint_samples 13700 joint_samples 41 [692174, 1039506] [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure processed_samples 13701 unjoint_samples 13700 joint_samples 40 [1045971, 1044780] processed_samples 13701 unjoint_samples 13700 joint_samples 40 [1045971, 1044780] [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561289967dc0] mmco: unref short failure [h264 @ 0x561289967dc0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure processed_samples 13800 unjoint_samples 13800 joint_samples 41 [838010, 1032779] processed_samples 13801 unjoint_samples 13800 joint_samples 41 [1045971, 323078] processed_samples 13801 unjoint_samples 13800 joint_samples 42 [67960, 1047648] [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure processed_samples 13801 unjoint_samples 13800 joint_samples 41 [216459, 1047979] processed_samples 13800 unjoint_samples 13800 joint_samples 41 [423678, 1038564] processed_samples 13801 unjoint_samples 13800 joint_samples 42 [1045205, 106467] processed_samples 13800 unjoint_samples 13800 joint_samples 40 [1015423, 759271] processed_samples 13800 unjoint_samples 13800 joint_samples 40 [1015423, 759271] processed_samples 13801 unjoint_samples 13800 joint_samples 40 [1032058, 1047441] processed_samples 13801 unjoint_samples 13800 joint_samples 42 [67960, 1047648] processed_samples 13801 unjoint_samples 13800 joint_samples 41 [1045971, 323078] [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure processed_samples 13801 unjoint_samples 13800 joint_samples 41 [216459, 1047979] processed_samples 13800 unjoint_samples 13800 joint_samples 41 [423678, 1038564] processed_samples 13801 unjoint_samples 13800 joint_samples 42 [1045205, 106467] processed_samples 13800 unjoint_samples 13800 joint_samples 41 [838010, 1032779] processed_samples 13801 unjoint_samples 13800 joint_samples 40 [1032058, 1047441] [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1030233, 121571] processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1030233, 121571] processed_samples 13901 unjoint_samples 13900 joint_samples 42 [1045205, 366358] processed_samples 13901 unjoint_samples 13900 joint_samples 42 [1045205, 366358] processed_samples 13901 unjoint_samples 13900 joint_samples 41 [527816, 1047979] processed_samples 13901 unjoint_samples 13900 joint_samples 41 [527816, 1047979] processed_samples 13900 unjoint_samples 13900 joint_samples 42 [1046943, 105351] processed_samples 13900 unjoint_samples 13900 joint_samples 42 [1046943, 105351] processed_samples 13900 unjoint_samples 13900 joint_samples 41 [739552, 1038564] processed_samples 13900 unjoint_samples 13900 joint_samples 41 [739552, 1038564] processed_samples 13901 unjoint_samples 13900 joint_samples 41 [1045971, 692067] processed_samples 13901 unjoint_samples 13900 joint_samples 41 [1045971, 692067] processed_samples 13901 unjoint_samples 13900 joint_samples 42 [499296, 1047648] processed_samples 13901 unjoint_samples 13900 joint_samples 42 [499296, 1047648] processed_samples 13901 unjoint_samples 13900 joint_samples 41 [1043443, 472857] processed_samples 13901 unjoint_samples 13900 joint_samples 41 [1043443, 472857] [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56126372f140] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure processed_samples 14000 unjoint_samples 14000 joint_samples 41 [1030233, 452591] processed_samples 14000 unjoint_samples 14000 joint_samples 42 [1046943, 392167] processed_samples 14001 unjoint_samples 14000 joint_samples 42 [1045205, 729222] processed_samples 14001 unjoint_samples 14000 joint_samples 42 [921174, 1047648] [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 14001 unjoint_samples 14000 joint_samples 41 [839275, 1047979] processed_samples 14000 unjoint_samples 14000 joint_samples 41 [1041175, 1044513] processed_samples 14001 unjoint_samples 14000 joint_samples 41 [1043443, 813669] [h264 @ 0x56128a0be8c0] mmco: unref short failure processed_samples 14000 unjoint_samples 14000 joint_samples 42 [1046943, 392167] processed_samples 14000 unjoint_samples 14000 joint_samples 41 [1030233, 452591] processed_samples 14001 unjoint_samples 14000 joint_samples 41 [1045971, 1026446] [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure processed_samples 14001 unjoint_samples 14000 joint_samples 42 [1045205, 729222] [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure processed_samples 14001 unjoint_samples 14000 joint_samples 42 [921174, 1047648] processed_samples 14000 unjoint_samples 14000 joint_samples 41 [1041175, 1044513] processed_samples 14001 unjoint_samples 14000 joint_samples 41 [839275, 1047979] [h264 @ 0x55e22cc27500] mmco: unref short failure processed_samples 14001 unjoint_samples 14000 joint_samples 41 [1043443, 813669] processed_samples 14001 unjoint_samples 14000 joint_samples 41 [1045971, 1026446] [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure processed_samples 14101 unjoint_samples 14100 joint_samples 42 [1046812, 101869] processed_samples 14100 unjoint_samples 14100 joint_samples 42 [268544, 1047755] processed_samples 14101 unjoint_samples 14100 joint_samples 43 [1020934, 250080] processed_samples 14100 unjoint_samples 14100 joint_samples 41 [1030233, 703767] processed_samples 14101 unjoint_samples 14100 joint_samples 43 [160479, 1018629] processed_samples 14101 unjoint_samples 14100 joint_samples 42 [393994, 1046210] processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1046943, 748560] processed_samples 14101 unjoint_samples 14100 joint_samples 42 [102391, 1025301] [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure processed_samples 14101 unjoint_samples 14100 joint_samples 42 [1046812, 101869] processed_samples 14100 unjoint_samples 14100 joint_samples 42 [268544, 1047755] processed_samples 14100 unjoint_samples 14100 joint_samples 41 [1030233, 703767] processed_samples 14101 unjoint_samples 14100 joint_samples 43 [1020934, 250080] processed_samples 14101 unjoint_samples 14100 joint_samples 43 [160479, 1018629] processed_samples 14101 unjoint_samples 14100 joint_samples 42 [393994, 1046210] processed_samples 14101 unjoint_samples 14100 joint_samples 42 [102391, 1025301] processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1046943, 748560] [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561287e360c0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure processed_samples 14200 unjoint_samples 14200 joint_samples 43 [146139, 1046127] processed_samples 14200 unjoint_samples 14200 joint_samples 43 [146139, 1046127] processed_samples 14201 unjoint_samples 14200 joint_samples 42 [806338, 1046210] processed_samples 14201 unjoint_samples 14200 joint_samples 42 [806338, 1046210] processed_samples 14201 unjoint_samples 14200 joint_samples 43 [416944, 1018629] processed_samples 14201 unjoint_samples 14200 joint_samples 43 [1020934, 632152] processed_samples 14201 unjoint_samples 14200 joint_samples 42 [1046812, 372542] processed_samples 14201 unjoint_samples 14200 joint_samples 43 [416944, 1018629] processed_samples 14201 unjoint_samples 14200 joint_samples 43 [1020934, 632152] processed_samples 14200 unjoint_samples 14200 joint_samples 42 [487786, 1047755] processed_samples 14201 unjoint_samples 14200 joint_samples 42 [1046812, 372542] processed_samples 14200 unjoint_samples 14200 joint_samples 42 [487786, 1047755] processed_samples 14201 unjoint_samples 14200 joint_samples 42 [423588, 1025301] processed_samples 14201 unjoint_samples 14200 joint_samples 42 [423588, 1025301] processed_samples 14200 unjoint_samples 14200 joint_samples 41 [1036678, 1033332] processed_samples 14200 unjoint_samples 14200 joint_samples 41 [1036678, 1033332] [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure processed_samples 14300 unjoint_samples 14300 joint_samples 42 [309218, 1041031] processed_samples 14300 unjoint_samples 14300 joint_samples 43 [423706, 1046127] processed_samples 14300 unjoint_samples 14300 joint_samples 42 [760128, 1047755] processed_samples 14301 unjoint_samples 14300 joint_samples 43 [1045881, 57187] processed_samples 14300 unjoint_samples 14300 joint_samples 42 [309218, 1041031] processed_samples 14301 unjoint_samples 14300 joint_samples 43 [745520, 1018629] processed_samples 14300 unjoint_samples 14300 joint_samples 43 [423706, 1046127] processed_samples 14301 unjoint_samples 14300 joint_samples 42 [1046812, 630451] processed_samples 14301 unjoint_samples 14300 joint_samples 43 [1020934, 962107] processed_samples 14300 unjoint_samples 14300 joint_samples 42 [760128, 1047755] processed_samples 14301 unjoint_samples 14300 joint_samples 43 [1045881, 57187] processed_samples 14301 unjoint_samples 14300 joint_samples 43 [745520, 1018629] processed_samples 14301 unjoint_samples 14300 joint_samples 42 [754889, 1025301] processed_samples 14301 unjoint_samples 14300 joint_samples 43 [1020934, 962107] processed_samples 14301 unjoint_samples 14300 joint_samples 42 [1046812, 630451] processed_samples 14301 unjoint_samples 14300 joint_samples 42 [754889, 1025301] [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure processed_samples 14400 unjoint_samples 14400 joint_samples 43 [852559, 1046127] processed_samples 14401 unjoint_samples 14400 joint_samples 44 [309455, 1028924] processed_samples 14401 unjoint_samples 14400 joint_samples 44 [309455, 1028924] processed_samples 14400 unjoint_samples 14400 joint_samples 43 [852559, 1046127] processed_samples 14400 unjoint_samples 14400 joint_samples 42 [707487, 1041031] processed_samples 14401 unjoint_samples 14400 joint_samples 43 [50757, 1032760] processed_samples 14401 unjoint_samples 14400 joint_samples 43 [1045881, 402647] processed_samples 14400 unjoint_samples 14400 joint_samples 42 [707487, 1041031] processed_samples 14400 unjoint_samples 14400 joint_samples 42 [1008527, 1047755] processed_samples 14401 unjoint_samples 14400 joint_samples 43 [1045881, 402647] processed_samples 14401 unjoint_samples 14400 joint_samples 42 [1046812, 944529] processed_samples 14400 unjoint_samples 14400 joint_samples 42 [1008527, 1047755] processed_samples 14401 unjoint_samples 14400 joint_samples 42 [1046812, 944529] processed_samples 14401 unjoint_samples 14400 joint_samples 43 [50757, 1032760] [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure processed_samples 14401 unjoint_samples 14400 joint_samples 43 [1010658, 1018629] processed_samples 14401 unjoint_samples 14400 joint_samples 43 [1010658, 1018629] [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure processed_samples 14500 unjoint_samples 14500 joint_samples 44 [1044958, 272467] [h264 @ 0x55e22d087040] mmco: unref short failure processed_samples 14501 unjoint_samples 14500 joint_samples 43 [1047617, 242765] processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1034260, 316171] processed_samples 14501 unjoint_samples 14500 joint_samples 44 [738305, 1028924] processed_samples 14501 unjoint_samples 14500 joint_samples 43 [426763, 1032760] processed_samples 14501 unjoint_samples 14500 joint_samples 43 [1045881, 772538] [h264 @ 0x56128900a440] mmco: unref short failure processed_samples 14500 unjoint_samples 14500 joint_samples 42 [987059, 1041031] processed_samples 14501 unjoint_samples 14500 joint_samples 44 [299201, 1043905] [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure processed_samples 14500 unjoint_samples 14500 joint_samples 44 [1044958, 272467] [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure processed_samples 14501 unjoint_samples 14500 joint_samples 43 [1047617, 242765] processed_samples 14501 unjoint_samples 14500 joint_samples 44 [738305, 1028924] processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1034260, 316171] processed_samples 14501 unjoint_samples 14500 joint_samples 43 [1045881, 772538] processed_samples 14501 unjoint_samples 14500 joint_samples 43 [426763, 1032760] processed_samples 14501 unjoint_samples 14500 joint_samples 44 [299201, 1043905] processed_samples 14500 unjoint_samples 14500 joint_samples 42 [987059, 1041031] [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 14600 unjoint_samples 14600 joint_samples 44 [1044958, 606770] processed_samples 14601 unjoint_samples 14600 joint_samples 45 [23613, 1047119] processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1046675, 159901] processed_samples 14600 unjoint_samples 14600 joint_samples 44 [1044958, 606770] processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1034260, 658519] processed_samples 14601 unjoint_samples 14600 joint_samples 45 [23613, 1047119] processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1046675, 159901] processed_samples 14601 unjoint_samples 14600 joint_samples 43 [1047617, 654601] processed_samples 14601 unjoint_samples 14600 joint_samples 43 [1047617, 654601] processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1034260, 658519] processed_samples 14601 unjoint_samples 14600 joint_samples 43 [667773, 1032760] processed_samples 14601 unjoint_samples 14600 joint_samples 44 [104456, 1046664] processed_samples 14601 unjoint_samples 14600 joint_samples 43 [667773, 1032760] processed_samples 14601 unjoint_samples 14600 joint_samples 44 [104456, 1046664] processed_samples 14601 unjoint_samples 14600 joint_samples 44 [621949, 1043905] processed_samples 14601 unjoint_samples 14600 joint_samples 44 [621949, 1043905] [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22b79bfc0] mmco: unref short failure [h264 @ 0x55e22b79bfc0] mmco: unref short failure [h264 @ 0x55e22b79bfc0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b79bfc0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure processed_samples 14700 unjoint_samples 14700 joint_samples 43 [1046675, 618603] processed_samples 14700 unjoint_samples 14700 joint_samples 44 [1044958, 917677] [h264 @ 0x56128cde2540] mmco: unref short failure processed_samples 14701 unjoint_samples 14700 joint_samples 45 [267189, 1047119] processed_samples 14701 unjoint_samples 14700 joint_samples 44 [346142, 1046664] processed_samples 14701 unjoint_samples 14700 joint_samples 44 [887737, 1043905] processed_samples 14700 unjoint_samples 14700 joint_samples 43 [1046675, 618603] processed_samples 14700 unjoint_samples 14700 joint_samples 44 [1044958, 917677] [h264 @ 0x55e22a4b04c0] mmco: unref short failure processed_samples 14701 unjoint_samples 14700 joint_samples 44 [887737, 1043905] processed_samples 14701 unjoint_samples 14700 joint_samples 44 [346142, 1046664] processed_samples 14701 unjoint_samples 14700 joint_samples 43 [954945, 1032760] processed_samples 14701 unjoint_samples 14700 joint_samples 45 [267189, 1047119] processed_samples 14701 unjoint_samples 14700 joint_samples 43 [1047617, 882547] processed_samples 14700 unjoint_samples 14700 joint_samples 43 [1034260, 971117] processed_samples 14700 unjoint_samples 14700 joint_samples 43 [1034260, 971117] processed_samples 14701 unjoint_samples 14700 joint_samples 43 [1047617, 882547] processed_samples 14701 unjoint_samples 14700 joint_samples 43 [954945, 1032760] [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure processed_samples 14800 unjoint_samples 14800 joint_samples 45 [118205, 1047704] processed_samples 14800 unjoint_samples 14800 joint_samples 45 [118205, 1047704] processed_samples 14801 unjoint_samples 14800 joint_samples 44 [1047617, 75979] processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1036519, 336341] processed_samples 14801 unjoint_samples 14800 joint_samples 44 [1047617, 75979] processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1036519, 336341] processed_samples 14800 unjoint_samples 14800 joint_samples 43 [1046675, 846539] processed_samples 14801 unjoint_samples 14800 joint_samples 45 [1046784, 134778] processed_samples 14801 unjoint_samples 14800 joint_samples 45 [1046784, 134778] processed_samples 14801 unjoint_samples 14800 joint_samples 44 [340853, 1034845] processed_samples 14801 unjoint_samples 14800 joint_samples 45 [610062, 1047119] processed_samples 14801 unjoint_samples 14800 joint_samples 45 [610062, 1047119] processed_samples 14801 unjoint_samples 14800 joint_samples 44 [340853, 1034845] processed_samples 14800 unjoint_samples 14800 joint_samples 43 [1046675, 846539] processed_samples 14801 unjoint_samples 14800 joint_samples 44 [637994, 1046664] processed_samples 14801 unjoint_samples 14800 joint_samples 44 [637994, 1046664] [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure processed_samples 14900 unjoint_samples 14900 joint_samples 45 [506338, 1047704] [h264 @ 0x55e22a926d00] mmco: unref short failure processed_samples 14901 unjoint_samples 14900 joint_samples 44 [1047617, 286149] processed_samples 14900 unjoint_samples 14900 joint_samples 44 [188884, 1045282] processed_samples 14901 unjoint_samples 14900 joint_samples 45 [1046784, 349457] processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1036519, 573813] processed_samples 14901 unjoint_samples 14900 joint_samples 45 [938772, 1047119] processed_samples 14901 unjoint_samples 14900 joint_samples 44 [574378, 1034845] processed_samples 14900 unjoint_samples 14900 joint_samples 45 [506338, 1047704] processed_samples 14901 unjoint_samples 14900 joint_samples 44 [1008134, 1046664] processed_samples 14901 unjoint_samples 14900 joint_samples 44 [1047617, 286149] processed_samples 14900 unjoint_samples 14900 joint_samples 44 [188884, 1045282] processed_samples 14901 unjoint_samples 14900 joint_samples 45 [1046784, 349457] processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1036519, 573813] processed_samples 14901 unjoint_samples 14900 joint_samples 45 [938772, 1047119] processed_samples 14901 unjoint_samples 14900 joint_samples 44 [574378, 1034845] [h264 @ 0x561289dfadc0] mmco: unref short failure processed_samples 14901 unjoint_samples 14900 joint_samples 44 [1008134, 1046664] [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure processed_samples 15000 unjoint_samples 15000 joint_samples 44 [643707, 1045282] processed_samples 15001 unjoint_samples 15000 joint_samples 45 [263867, 1046664] processed_samples 15001 unjoint_samples 15000 joint_samples 44 [920906, 1034845] processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1036519, 860029] processed_samples 15001 unjoint_samples 15000 joint_samples 46 [1021611, 220072] processed_samples 15000 unjoint_samples 15000 joint_samples 44 [643707, 1045282] processed_samples 15001 unjoint_samples 15000 joint_samples 45 [1046784, 589095] processed_samples 15000 unjoint_samples 15000 joint_samples 45 [813021, 1047704] processed_samples 15001 unjoint_samples 15000 joint_samples 44 [1047617, 668892] processed_samples 15001 unjoint_samples 15000 joint_samples 45 [263867, 1046664] processed_samples 15001 unjoint_samples 15000 joint_samples 44 [920906, 1034845] processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1036519, 860029] processed_samples 15001 unjoint_samples 15000 joint_samples 46 [1021611, 220072] processed_samples 15001 unjoint_samples 15000 joint_samples 45 [1046784, 589095] processed_samples 15001 unjoint_samples 15000 joint_samples 44 [1047617, 668892] processed_samples 15000 unjoint_samples 15000 joint_samples 45 [813021, 1047704] [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x55e228c40900] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure processed_samples 15100 unjoint_samples 15100 joint_samples 46 [1005660, 177884] processed_samples 15100 unjoint_samples 15100 joint_samples 46 [1005660, 177884] [h264 @ 0x5612892544c0] mmco: unref short failure processed_samples 15101 unjoint_samples 15100 joint_samples 45 [24616, 1046336] processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1046664, 132697] [h264 @ 0x55e2290ef580] mmco: unref short failure processed_samples 15101 unjoint_samples 15100 joint_samples 45 [24616, 1046336] processed_samples 15101 unjoint_samples 15100 joint_samples 46 [1021611, 471952] processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1046664, 132697] processed_samples 15101 unjoint_samples 15100 joint_samples 45 [425772, 1047449] processed_samples 15100 unjoint_samples 15100 joint_samples 44 [939686, 1045282] processed_samples 15101 unjoint_samples 15100 joint_samples 46 [1021611, 471952] processed_samples 15101 unjoint_samples 15100 joint_samples 45 [605248, 1046664] processed_samples 15100 unjoint_samples 15100 joint_samples 44 [939686, 1045282] processed_samples 15101 unjoint_samples 15100 joint_samples 45 [425772, 1047449] processed_samples 15101 unjoint_samples 15100 joint_samples 45 [605248, 1046664] processed_samples 15101 unjoint_samples 15100 joint_samples 45 [1046784, 861792] [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure processed_samples 15101 unjoint_samples 15100 joint_samples 45 [1046784, 861792] [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure processed_samples 15200 unjoint_samples 15200 joint_samples 46 [1005660, 624586] processed_samples 15200 unjoint_samples 15200 joint_samples 45 [163767, 1047158] processed_samples 15201 unjoint_samples 15200 joint_samples 45 [320241, 1046336] processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1046664, 561031] processed_samples 15201 unjoint_samples 15200 joint_samples 46 [1046784, 178833] processed_samples 15201 unjoint_samples 15200 joint_samples 46 [1021611, 708477] [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure processed_samples 15201 unjoint_samples 15200 joint_samples 45 [835132, 1046664] processed_samples 15201 unjoint_samples 15200 joint_samples 45 [750505, 1047449] [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure processed_samples 15200 unjoint_samples 15200 joint_samples 45 [163767, 1047158] processed_samples 15201 unjoint_samples 15200 joint_samples 45 [320241, 1046336] processed_samples 15200 unjoint_samples 15200 joint_samples 46 [1005660, 624586] processed_samples 15201 unjoint_samples 15200 joint_samples 46 [1046784, 178833] processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1046664, 561031] processed_samples 15201 unjoint_samples 15200 joint_samples 46 [1021611, 708477] [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 15201 unjoint_samples 15200 joint_samples 45 [835132, 1046664] processed_samples 15201 unjoint_samples 15200 joint_samples 45 [750505, 1047449] [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure processed_samples 15300 unjoint_samples 15300 joint_samples 46 [1005660, 974126] processed_samples 15301 unjoint_samples 15300 joint_samples 45 [598494, 1046336] processed_samples 15300 unjoint_samples 15300 joint_samples 45 [518701, 1047158] processed_samples 15301 unjoint_samples 15300 joint_samples 46 [1033667, 107395] processed_samples 15301 unjoint_samples 15300 joint_samples 46 [70328, 1047449] processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1046664, 836812] processed_samples 15300 unjoint_samples 15300 joint_samples 46 [1005660, 974126] processed_samples 15301 unjoint_samples 15300 joint_samples 46 [1046784, 558826] processed_samples 15301 unjoint_samples 15300 joint_samples 45 [598494, 1046336] processed_samples 15301 unjoint_samples 15300 joint_samples 46 [1021611, 1009680] processed_samples 15300 unjoint_samples 15300 joint_samples 45 [518701, 1047158] processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1046664, 836812] processed_samples 15301 unjoint_samples 15300 joint_samples 46 [1033667, 107395] processed_samples 15301 unjoint_samples 15300 joint_samples 46 [70328, 1047449] processed_samples 15301 unjoint_samples 15300 joint_samples 46 [1046784, 558826] processed_samples 15301 unjoint_samples 15300 joint_samples 46 [1021611, 1009680] [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 15401 unjoint_samples 15400 joint_samples 47 [193570, 1047950] processed_samples 15400 unjoint_samples 15400 joint_samples 47 [169769, 1031938] processed_samples 15400 unjoint_samples 15400 joint_samples 45 [997872, 1047158] processed_samples 15400 unjoint_samples 15400 joint_samples 46 [181591, 1029143] processed_samples 15401 unjoint_samples 15400 joint_samples 46 [1033667, 403022] processed_samples 15401 unjoint_samples 15400 joint_samples 46 [463326, 1047449] processed_samples 15401 unjoint_samples 15400 joint_samples 45 [901006, 1046336] [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure processed_samples 15400 unjoint_samples 15400 joint_samples 46 [181591, 1029143] processed_samples 15401 unjoint_samples 15400 joint_samples 47 [193570, 1047950] processed_samples 15400 unjoint_samples 15400 joint_samples 47 [169769, 1031938] processed_samples 15400 unjoint_samples 15400 joint_samples 45 [997872, 1047158] processed_samples 15401 unjoint_samples 15400 joint_samples 46 [1033667, 403022] processed_samples 15401 unjoint_samples 15400 joint_samples 46 [463326, 1047449] processed_samples 15401 unjoint_samples 15400 joint_samples 46 [1046784, 866678] processed_samples 15401 unjoint_samples 15400 joint_samples 45 [901006, 1046336] processed_samples 15401 unjoint_samples 15400 joint_samples 46 [1046784, 866678] [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure processed_samples 15500 unjoint_samples 15500 joint_samples 47 [513543, 1031938] processed_samples 15500 unjoint_samples 15500 joint_samples 47 [513543, 1031938] [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1036316, 299242] processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1036316, 299242] processed_samples 15501 unjoint_samples 15500 joint_samples 46 [1012491, 377114] [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure processed_samples 15501 unjoint_samples 15500 joint_samples 46 [1012491, 377114] [h264 @ 0x55e22a960b00] mmco: unref short failure processed_samples 15500 unjoint_samples 15500 joint_samples 46 [491016, 1029143] processed_samples 15501 unjoint_samples 15500 joint_samples 47 [509907, 1047950] processed_samples 15500 unjoint_samples 15500 joint_samples 46 [491016, 1029143] processed_samples 15501 unjoint_samples 15500 joint_samples 46 [1033667, 694003] [h264 @ 0x55e22b2b5b40] mmco: unref short failure processed_samples 15501 unjoint_samples 15500 joint_samples 46 [1033667, 694003] [h264 @ 0x5612894f3a80] mmco: unref short failure processed_samples 15501 unjoint_samples 15500 joint_samples 47 [194752, 1037402] processed_samples 15501 unjoint_samples 15500 joint_samples 47 [194752, 1037402] [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure processed_samples 15501 unjoint_samples 15500 joint_samples 47 [509907, 1047950] [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure processed_samples 15501 unjoint_samples 15500 joint_samples 46 [728515, 1047449] processed_samples 15501 unjoint_samples 15500 joint_samples 46 [728515, 1047449] [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22a387400] mmco: unref short failure [h264 @ 0x55e22a387400] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure processed_samples 15600 unjoint_samples 15600 joint_samples 47 [818903, 1031938] processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1036316, 581816] processed_samples 15601 unjoint_samples 15600 joint_samples 46 [1012491, 710446] processed_samples 15601 unjoint_samples 15600 joint_samples 47 [907145, 1047950] processed_samples 15601 unjoint_samples 15600 joint_samples 47 [495606, 1037402] processed_samples 15601 unjoint_samples 15600 joint_samples 46 [1033667, 993369] processed_samples 15600 unjoint_samples 15600 joint_samples 46 [928996, 1029143] [h264 @ 0x56128f5368c0] mmco: unref short failure processed_samples 15601 unjoint_samples 15600 joint_samples 46 [989098, 1047449] processed_samples 15600 unjoint_samples 15600 joint_samples 47 [818903, 1031938] processed_samples 15601 unjoint_samples 15600 joint_samples 46 [1012491, 710446] processed_samples 15601 unjoint_samples 15600 joint_samples 47 [907145, 1047950] processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1036316, 581816] processed_samples 15601 unjoint_samples 15600 joint_samples 47 [495606, 1037402] processed_samples 15600 unjoint_samples 15600 joint_samples 46 [928996, 1029143] processed_samples 15601 unjoint_samples 15600 joint_samples 46 [1033667, 993369] processed_samples 15601 unjoint_samples 15600 joint_samples 46 [989098, 1047449] [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure processed_samples 15700 unjoint_samples 15700 joint_samples 48 [1046266, 56903] processed_samples 15701 unjoint_samples 15700 joint_samples 48 [1045184, 268611] processed_samples 15700 unjoint_samples 15700 joint_samples 47 [230457, 1040630] processed_samples 15701 unjoint_samples 15700 joint_samples 47 [229781, 1046348] processed_samples 15701 unjoint_samples 15700 joint_samples 47 [712262, 1037402] processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1036316, 930595] [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure processed_samples 15701 unjoint_samples 15700 joint_samples 47 [276068, 1047449] processed_samples 15701 unjoint_samples 15700 joint_samples 46 [1012491, 977095] processed_samples 15700 unjoint_samples 15700 joint_samples 48 [1046266, 56903] processed_samples 15701 unjoint_samples 15700 joint_samples 47 [712262, 1037402] processed_samples 15701 unjoint_samples 15700 joint_samples 48 [1045184, 268611] processed_samples 15700 unjoint_samples 15700 joint_samples 47 [230457, 1040630] processed_samples 15701 unjoint_samples 15700 joint_samples 47 [229781, 1046348] processed_samples 15701 unjoint_samples 15700 joint_samples 46 [1012491, 977095] processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1036316, 930595] processed_samples 15701 unjoint_samples 15700 joint_samples 47 [276068, 1047449] [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure processed_samples 15800 unjoint_samples 15800 joint_samples 48 [1046266, 493839] [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure processed_samples 15800 unjoint_samples 15800 joint_samples 47 [229570, 1045822] processed_samples 15801 unjoint_samples 15800 joint_samples 47 [161244, 1045417] processed_samples 15801 unjoint_samples 15800 joint_samples 47 [551758, 1046348] processed_samples 15801 unjoint_samples 15800 joint_samples 48 [1045184, 654094] processed_samples 15800 unjoint_samples 15800 joint_samples 47 [579059, 1040630] processed_samples 15801 unjoint_samples 15800 joint_samples 47 [1045548, 1037793] processed_samples 15801 unjoint_samples 15800 joint_samples 47 [553696, 1047449] [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure processed_samples 15800 unjoint_samples 15800 joint_samples 48 [1046266, 493839] [h264 @ 0x56128a6db800] mmco: unref short failure [h264 @ 0x56128a6db800] mmco: unref short failure [h264 @ 0x56128a6db800] mmco: unref short failure [h264 @ 0x56128a6db800] mmco: unref short failure [h264 @ 0x56128a6db800] mmco: unref short failure processed_samples 15801 unjoint_samples 15800 joint_samples 47 [161244, 1045417] processed_samples 15800 unjoint_samples 15800 joint_samples 47 [229570, 1045822] processed_samples 15801 unjoint_samples 15800 joint_samples 48 [1045184, 654094] processed_samples 15800 unjoint_samples 15800 joint_samples 47 [579059, 1040630] processed_samples 15801 unjoint_samples 15800 joint_samples 47 [551758, 1046348] processed_samples 15801 unjoint_samples 15800 joint_samples 47 [1045548, 1037793] processed_samples 15801 unjoint_samples 15800 joint_samples 47 [553696, 1047449] [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128cd1cc40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure processed_samples 15900 unjoint_samples 15900 joint_samples 47 [515843, 1045822] [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure processed_samples 15900 unjoint_samples 15900 joint_samples 47 [515843, 1045822] [h264 @ 0x56128884fb40] mmco: unref short failure processed_samples 15901 unjoint_samples 15900 joint_samples 47 [408833, 1045417] processed_samples 15901 unjoint_samples 15900 joint_samples 49 [76636, 1021802] processed_samples 15901 unjoint_samples 15900 joint_samples 48 [387249, 1046604] processed_samples 15901 unjoint_samples 15900 joint_samples 47 [923025, 1046348] processed_samples 15900 unjoint_samples 15900 joint_samples 47 [854231, 1040630] processed_samples 15901 unjoint_samples 15900 joint_samples 49 [76636, 1021802] processed_samples 15900 unjoint_samples 15900 joint_samples 48 [1046266, 798319] processed_samples 15901 unjoint_samples 15900 joint_samples 48 [387249, 1046604] processed_samples 15900 unjoint_samples 15900 joint_samples 48 [1046266, 798319] processed_samples 15901 unjoint_samples 15900 joint_samples 47 [408833, 1045417] [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 15901 unjoint_samples 15900 joint_samples 47 [923025, 1046348] processed_samples 15901 unjoint_samples 15900 joint_samples 47 [858576, 1047449] processed_samples 15900 unjoint_samples 15900 joint_samples 47 [854231, 1040630] processed_samples 15901 unjoint_samples 15900 joint_samples 47 [858576, 1047449] [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure processed_samples 16000 unjoint_samples 16000 joint_samples 49 [275649, 1009056] processed_samples 16000 unjoint_samples 16000 joint_samples 48 [982972, 198594] processed_samples 16001 unjoint_samples 16000 joint_samples 47 [623406, 1045417] processed_samples 16000 unjoint_samples 16000 joint_samples 47 [872098, 1045822] processed_samples 16001 unjoint_samples 16000 joint_samples 49 [343529, 1021802] processed_samples 16001 unjoint_samples 16000 joint_samples 48 [1035518, 279019] processed_samples 16001 unjoint_samples 16000 joint_samples 48 [1046647, 85928] processed_samples 16000 unjoint_samples 16000 joint_samples 49 [275649, 1009056] processed_samples 16001 unjoint_samples 16000 joint_samples 48 [1035518, 279019] processed_samples 16000 unjoint_samples 16000 joint_samples 48 [982972, 198594] processed_samples 16001 unjoint_samples 16000 joint_samples 47 [623406, 1045417] processed_samples 16001 unjoint_samples 16000 joint_samples 49 [343529, 1021802] processed_samples 16000 unjoint_samples 16000 joint_samples 47 [872098, 1045822] processed_samples 16001 unjoint_samples 16000 joint_samples 48 [1046647, 85928] processed_samples 16001 unjoint_samples 16000 joint_samples 48 [695160, 1046604] [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure processed_samples 16001 unjoint_samples 16000 joint_samples 48 [695160, 1046604] [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure processed_samples 16101 unjoint_samples 16100 joint_samples 49 [570778, 1021802] processed_samples 16101 unjoint_samples 16100 joint_samples 49 [1033608, 61523] processed_samples 16100 unjoint_samples 16100 joint_samples 48 [168069, 1045822] processed_samples 16100 unjoint_samples 16100 joint_samples 49 [488828, 1009056] [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure processed_samples 16101 unjoint_samples 16100 joint_samples 48 [1046647, 517613] processed_samples 16101 unjoint_samples 16100 joint_samples 47 [958141, 1045417] processed_samples 16100 unjoint_samples 16100 joint_samples 48 [982972, 709935] processed_samples 16101 unjoint_samples 16100 joint_samples 48 [1035518, 561748] processed_samples 16100 unjoint_samples 16100 joint_samples 48 [168069, 1045822] processed_samples 16101 unjoint_samples 16100 joint_samples 49 [570778, 1021802] processed_samples 16101 unjoint_samples 16100 joint_samples 49 [1033608, 61523] processed_samples 16101 unjoint_samples 16100 joint_samples 48 [1046647, 517613] processed_samples 16101 unjoint_samples 16100 joint_samples 48 [1035518, 561748] processed_samples 16100 unjoint_samples 16100 joint_samples 49 [488828, 1009056] processed_samples 16100 unjoint_samples 16100 joint_samples 48 [982972, 709935] processed_samples 16101 unjoint_samples 16100 joint_samples 47 [958141, 1045417] [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure processed_samples 16201 unjoint_samples 16200 joint_samples 48 [506370, 1045417] processed_samples 16201 unjoint_samples 16200 joint_samples 49 [870337, 1021802] processed_samples 16200 unjoint_samples 16200 joint_samples 49 [744884, 1009056] processed_samples 16201 unjoint_samples 16200 joint_samples 49 [1033608, 322158] processed_samples 16201 unjoint_samples 16200 joint_samples 48 [1035518, 845078] processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1023239, 995599] processed_samples 16200 unjoint_samples 16200 joint_samples 48 [420289, 1045822] processed_samples 16200 unjoint_samples 16200 joint_samples 49 [744884, 1009056] processed_samples 16201 unjoint_samples 16200 joint_samples 48 [506370, 1045417] processed_samples 16201 unjoint_samples 16200 joint_samples 49 [1033608, 322158] processed_samples 16201 unjoint_samples 16200 joint_samples 48 [1035518, 845078] processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1023239, 995599] processed_samples 16201 unjoint_samples 16200 joint_samples 48 [1046647, 860638] [h264 @ 0x56128cbd5480] mmco: unref short failure processed_samples 16200 unjoint_samples 16200 joint_samples 48 [420289, 1045822] processed_samples 16201 unjoint_samples 16200 joint_samples 49 [870337, 1021802] [h264 @ 0x56128b6efc40] mmco: unref short failure processed_samples 16201 unjoint_samples 16200 joint_samples 48 [1046647, 860638] [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 16301 unjoint_samples 16300 joint_samples 50 [202684, 1043354] processed_samples 16300 unjoint_samples 16300 joint_samples 49 [275893, 1047826] processed_samples 16301 unjoint_samples 16300 joint_samples 49 [56261, 1048381] processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1023161, 1036334] processed_samples 16300 unjoint_samples 16300 joint_samples 49 [275893, 1047826] processed_samples 16301 unjoint_samples 16300 joint_samples 49 [56261, 1048381] processed_samples 16301 unjoint_samples 16300 joint_samples 50 [202684, 1043354] processed_samples 16301 unjoint_samples 16300 joint_samples 49 [1033608, 670123] processed_samples 16301 unjoint_samples 16300 joint_samples 49 [1046647, 129701] processed_samples 16301 unjoint_samples 16300 joint_samples 49 [1046647, 129701] processed_samples 16301 unjoint_samples 16300 joint_samples 48 [987633, 1045417] processed_samples 16301 unjoint_samples 16300 joint_samples 48 [987633, 1045417] processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1023161, 1036334] processed_samples 16301 unjoint_samples 16300 joint_samples 49 [1033608, 670123] processed_samples 16300 unjoint_samples 16300 joint_samples 48 [922531, 1045822] processed_samples 16300 unjoint_samples 16300 joint_samples 48 [922531, 1045822] [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228ce0840] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure processed_samples 16400 unjoint_samples 16400 joint_samples 50 [372719, 1045545] processed_samples 16401 unjoint_samples 16400 joint_samples 49 [1047090, 282454] processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1033907, 161540] processed_samples 16401 unjoint_samples 16400 joint_samples 50 [550771, 1043354] processed_samples 16400 unjoint_samples 16400 joint_samples 49 [576274, 1047826] processed_samples 16401 unjoint_samples 16400 joint_samples 49 [311590, 1048381] processed_samples 16401 unjoint_samples 16400 joint_samples 49 [1046647, 398605] processed_samples 16401 unjoint_samples 16400 joint_samples 49 [1033608, 997243] [h264 @ 0x561289d4ff80] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 16400 unjoint_samples 16400 joint_samples 50 [372719, 1045545] processed_samples 16401 unjoint_samples 16400 joint_samples 49 [1047090, 282454] processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1033907, 161540] processed_samples 16401 unjoint_samples 16400 joint_samples 50 [550771, 1043354] processed_samples 16400 unjoint_samples 16400 joint_samples 49 [576274, 1047826] processed_samples 16401 unjoint_samples 16400 joint_samples 49 [311590, 1048381] processed_samples 16401 unjoint_samples 16400 joint_samples 49 [1046647, 398605] [h264 @ 0x55e229b23540] mmco: unref short failure processed_samples 16401 unjoint_samples 16400 joint_samples 49 [1033608, 997243] [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure processed_samples 16500 unjoint_samples 16500 joint_samples 50 [689360, 1045545] processed_samples 16501 unjoint_samples 16500 joint_samples 50 [242073, 1026619] processed_samples 16501 unjoint_samples 16500 joint_samples 49 [589595, 1048381] processed_samples 16501 unjoint_samples 16500 joint_samples 49 [1047090, 645750] processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1033907, 534503] processed_samples 16500 unjoint_samples 16500 joint_samples 49 [896887, 1047826] processed_samples 16501 unjoint_samples 16500 joint_samples 49 [1046647, 668832] processed_samples 16501 unjoint_samples 16500 joint_samples 50 [802897, 1043354] processed_samples 16500 unjoint_samples 16500 joint_samples 50 [689360, 1045545] processed_samples 16501 unjoint_samples 16500 joint_samples 50 [242073, 1026619] processed_samples 16501 unjoint_samples 16500 joint_samples 49 [589595, 1048381] processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1033907, 534503] processed_samples 16501 unjoint_samples 16500 joint_samples 49 [1047090, 645750] processed_samples 16501 unjoint_samples 16500 joint_samples 49 [1046647, 668832] processed_samples 16501 unjoint_samples 16500 joint_samples 50 [802897, 1043354] processed_samples 16500 unjoint_samples 16500 joint_samples 49 [896887, 1047826] [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22b359480] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure processed_samples 16600 unjoint_samples 16600 joint_samples 50 [1044666, 92940] processed_samples 16600 unjoint_samples 16600 joint_samples 50 [1044666, 92940] processed_samples 16600 unjoint_samples 16600 joint_samples 50 [922065, 1045545] processed_samples 16601 unjoint_samples 16600 joint_samples 51 [1040832, 76982] processed_samples 16601 unjoint_samples 16600 joint_samples 50 [572479, 1026619] processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1033907, 940192] processed_samples 16600 unjoint_samples 16600 joint_samples 50 [922065, 1045545] processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1033907, 940192] processed_samples 16601 unjoint_samples 16600 joint_samples 50 [572479, 1026619] processed_samples 16601 unjoint_samples 16600 joint_samples 51 [1040832, 76982] processed_samples 16601 unjoint_samples 16600 joint_samples 49 [893065, 1048381] processed_samples 16601 unjoint_samples 16600 joint_samples 49 [893065, 1048381] processed_samples 16601 unjoint_samples 16600 joint_samples 49 [1046647, 977168] processed_samples 16601 unjoint_samples 16600 joint_samples 49 [1046647, 977168] processed_samples 16601 unjoint_samples 16600 joint_samples 49 [1047090, 1027528] processed_samples 16601 unjoint_samples 16600 joint_samples 49 [1047090, 1027528] [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure processed_samples 16700 unjoint_samples 16700 joint_samples 50 [151079, 1048163] processed_samples 16701 unjoint_samples 16700 joint_samples 50 [322707, 1045768] processed_samples 16700 unjoint_samples 16700 joint_samples 51 [1017139, 257556] processed_samples 16701 unjoint_samples 16700 joint_samples 50 [131351, 1048445] processed_samples 16701 unjoint_samples 16700 joint_samples 51 [1040832, 327229] processed_samples 16701 unjoint_samples 16700 joint_samples 50 [340224, 1027629] processed_samples 16700 unjoint_samples 16700 joint_samples 50 [1044666, 399638] processed_samples 16701 unjoint_samples 16700 joint_samples 50 [833381, 1026619] [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure processed_samples 16700 unjoint_samples 16700 joint_samples 51 [1017139, 257556] processed_samples 16700 unjoint_samples 16700 joint_samples 50 [151079, 1048163] processed_samples 16701 unjoint_samples 16700 joint_samples 50 [322707, 1045768] processed_samples 16701 unjoint_samples 16700 joint_samples 50 [131351, 1048445] processed_samples 16701 unjoint_samples 16700 joint_samples 50 [340224, 1027629] processed_samples 16700 unjoint_samples 16700 joint_samples 50 [1044666, 399638] processed_samples 16701 unjoint_samples 16700 joint_samples 51 [1040832, 327229] [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure processed_samples 16701 unjoint_samples 16700 joint_samples 50 [833381, 1026619] [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22df7b400] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure processed_samples 16800 unjoint_samples 16800 joint_samples 51 [1017139, 581495] processed_samples 16801 unjoint_samples 16800 joint_samples 51 [1046585, 15636] processed_samples 16800 unjoint_samples 16800 joint_samples 51 [1017139, 581495] processed_samples 16801 unjoint_samples 16800 joint_samples 51 [1046585, 15636] processed_samples 16801 unjoint_samples 16800 joint_samples 50 [560197, 1045768] processed_samples 16801 unjoint_samples 16800 joint_samples 50 [560197, 1045768] processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1044666, 715393] processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1044666, 715393] processed_samples 16801 unjoint_samples 16800 joint_samples 51 [1040832, 638022] processed_samples 16801 unjoint_samples 16800 joint_samples 51 [1040832, 638022] processed_samples 16800 unjoint_samples 16800 joint_samples 50 [490967, 1048163] processed_samples 16800 unjoint_samples 16800 joint_samples 50 [490967, 1048163] [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure processed_samples 16801 unjoint_samples 16800 joint_samples 50 [679933, 1027629] processed_samples 16801 unjoint_samples 16800 joint_samples 50 [679933, 1027629] processed_samples 16801 unjoint_samples 16800 joint_samples 50 [454246, 1048445] processed_samples 16801 unjoint_samples 16800 joint_samples 50 [454246, 1048445] [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure processed_samples 16900 unjoint_samples 16900 joint_samples 51 [75287, 1048221] [h264 @ 0x55e228becbc0] mmco: unref short failure processed_samples 16900 unjoint_samples 16900 joint_samples 51 [75287, 1048221] [h264 @ 0x561289dbaec0] mmco: unref short failure processed_samples 16901 unjoint_samples 16900 joint_samples 51 [1046585, 476125] processed_samples 16901 unjoint_samples 16900 joint_samples 51 [1046585, 476125] processed_samples 16900 unjoint_samples 16900 joint_samples 51 [1017139, 836757] processed_samples 16900 unjoint_samples 16900 joint_samples 51 [1017139, 836757] [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 16900 unjoint_samples 16900 joint_samples 50 [867599, 1048163] processed_samples 16900 unjoint_samples 16900 joint_samples 50 [867599, 1048163] processed_samples 16901 unjoint_samples 16900 joint_samples 51 [1047981, 1047806] processed_samples 16901 unjoint_samples 16900 joint_samples 51 [1047981, 1047806] [h264 @ 0x561289115f80] mmco: unref short failure processed_samples 16901 unjoint_samples 16900 joint_samples 50 [983178, 1027629] processed_samples 16901 unjoint_samples 16900 joint_samples 50 [983178, 1027629] processed_samples 16901 unjoint_samples 16900 joint_samples 50 [827579, 1045768] processed_samples 16901 unjoint_samples 16900 joint_samples 50 [827579, 1045768] processed_samples 16901 unjoint_samples 16900 joint_samples 50 [782332, 1048445] [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 16901 unjoint_samples 16900 joint_samples 50 [782332, 1048445] [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561287fd4dc0] mmco: unref short failure [h264 @ 0x561287fd4dc0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure processed_samples 17000 unjoint_samples 17000 joint_samples 52 [8891, 1045671] processed_samples 17000 unjoint_samples 17000 joint_samples 52 [8891, 1045671] processed_samples 17001 unjoint_samples 17000 joint_samples 52 [356470, 1047806] processed_samples 17001 unjoint_samples 17000 joint_samples 52 [356470, 1047806] processed_samples 17000 unjoint_samples 17000 joint_samples 51 [1047131, 86676] processed_samples 17000 unjoint_samples 17000 joint_samples 51 [1047131, 86676] processed_samples 17001 unjoint_samples 17000 joint_samples 51 [1046935, 258063] processed_samples 17001 unjoint_samples 17000 joint_samples 51 [1046920, 39856] processed_samples 17001 unjoint_samples 17000 joint_samples 51 [1046920, 39856] processed_samples 17001 unjoint_samples 17000 joint_samples 51 [1046935, 258063] processed_samples 17000 unjoint_samples 17000 joint_samples 51 [353405, 1048221] processed_samples 17000 unjoint_samples 17000 joint_samples 51 [353405, 1048221] processed_samples 17001 unjoint_samples 17000 joint_samples 51 [1046585, 773236] processed_samples 17001 unjoint_samples 17000 joint_samples 51 [1046585, 773236] processed_samples 17001 unjoint_samples 17000 joint_samples 50 [1042746, 1048445] processed_samples 17001 unjoint_samples 17000 joint_samples 50 [1042746, 1048445] [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22c6ade80] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure processed_samples 17100 unjoint_samples 17100 joint_samples 52 [237073, 1045671] processed_samples 17100 unjoint_samples 17100 joint_samples 52 [237073, 1045671] [h264 @ 0x55e229bee340] mmco: unref short failure processed_samples 17101 unjoint_samples 17100 joint_samples 51 [1046920, 322434] processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1047131, 427635] processed_samples 17101 unjoint_samples 17100 joint_samples 52 [73095, 1014627] [h264 @ 0x561288fa6740] mmco: unref short failure processed_samples 17100 unjoint_samples 17100 joint_samples 51 [621694, 1048221] processed_samples 17101 unjoint_samples 17100 joint_samples 51 [1046935, 655254] processed_samples 17101 unjoint_samples 17100 joint_samples 52 [711178, 1047806] processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1047131, 427635] processed_samples 17101 unjoint_samples 17100 joint_samples 51 [1046920, 322434] processed_samples 17101 unjoint_samples 17100 joint_samples 51 [1046629, 289117] processed_samples 17100 unjoint_samples 17100 joint_samples 51 [621694, 1048221] processed_samples 17101 unjoint_samples 17100 joint_samples 52 [73095, 1014627] processed_samples 17101 unjoint_samples 17100 joint_samples 51 [1046935, 655254] processed_samples 17101 unjoint_samples 17100 joint_samples 51 [1046629, 289117] processed_samples 17101 unjoint_samples 17100 joint_samples 52 [711178, 1047806] [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128a4dd440] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure processed_samples 17200 unjoint_samples 17200 joint_samples 52 [477794, 1045671] processed_samples 17200 unjoint_samples 17200 joint_samples 51 [1047131, 801577] [h264 @ 0x55e22961e8c0] mmco: unref short failure processed_samples 17201 unjoint_samples 17200 joint_samples 52 [384823, 1014627] [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure processed_samples 17201 unjoint_samples 17200 joint_samples 51 [1046935, 1008697] processed_samples 17201 unjoint_samples 17200 joint_samples 51 [1046920, 785415] processed_samples 17200 unjoint_samples 17200 joint_samples 51 [862147, 1048221] processed_samples 17202 unjoint_samples 17200 joint_samples 51 [1046629, 537112] processed_samples 17201 unjoint_samples 17200 joint_samples 52 [1031690, 1047806] processed_samples 17200 unjoint_samples 17200 joint_samples 52 [477794, 1045671] processed_samples 17201 unjoint_samples 17200 joint_samples 52 [384823, 1014627] processed_samples 17200 unjoint_samples 17200 joint_samples 51 [1047131, 801577] processed_samples 17201 unjoint_samples 17200 joint_samples 51 [1046935, 1008697] [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure processed_samples 17200 unjoint_samples 17200 joint_samples 51 [862147, 1048221] processed_samples 17201 unjoint_samples 17200 joint_samples 51 [1046920, 785415] [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure processed_samples 17202 unjoint_samples 17200 joint_samples 51 [1046629, 537112] processed_samples 17201 unjoint_samples 17200 joint_samples 52 [1031690, 1047806] [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22c87d140] mmco: unref short failure [h264 @ 0x55e22c87d140] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure processed_samples 17300 unjoint_samples 17300 joint_samples 52 [1047502, 63689] processed_samples 17300 unjoint_samples 17300 joint_samples 52 [64325, 1032816] processed_samples 17301 unjoint_samples 17300 joint_samples 53 [1046774, 337477] processed_samples 17301 unjoint_samples 17300 joint_samples 52 [1047455, 242276] processed_samples 17300 unjoint_samples 17300 joint_samples 52 [1047502, 63689] processed_samples 17300 unjoint_samples 17300 joint_samples 52 [64325, 1032816] processed_samples 17301 unjoint_samples 17300 joint_samples 52 [1047455, 242276] processed_samples 17300 unjoint_samples 17300 joint_samples 52 [753323, 1045671] processed_samples 17301 unjoint_samples 17300 joint_samples 53 [1046774, 337477] processed_samples 17300 unjoint_samples 17300 joint_samples 52 [753323, 1045671] processed_samples 17301 unjoint_samples 17300 joint_samples 51 [1046920, 1044993] processed_samples 17301 unjoint_samples 17300 joint_samples 52 [607562, 1014627] processed_samples 17302 unjoint_samples 17300 joint_samples 51 [1046629, 894991] processed_samples 17301 unjoint_samples 17300 joint_samples 52 [607562, 1014627] processed_samples 17301 unjoint_samples 17300 joint_samples 51 [1046920, 1044993] processed_samples 17302 unjoint_samples 17300 joint_samples 51 [1046629, 894991] [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1047502, 325549] processed_samples 17400 unjoint_samples 17400 joint_samples 52 [448228, 1032816] processed_samples 17401 unjoint_samples 17400 joint_samples 52 [354522, 1044993] processed_samples 17401 unjoint_samples 17400 joint_samples 52 [354522, 1044993] processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1047502, 325549] processed_samples 17400 unjoint_samples 17400 joint_samples 52 [448228, 1032816] processed_samples 17400 unjoint_samples 17400 joint_samples 53 [1047303, 9305] processed_samples 17400 unjoint_samples 17400 joint_samples 53 [1047303, 9305] processed_samples 17401 unjoint_samples 17400 joint_samples 52 [911374, 1014627] processed_samples 17401 unjoint_samples 17400 joint_samples 52 [911374, 1014627] processed_samples 17401 unjoint_samples 17400 joint_samples 52 [1047455, 556311] processed_samples 17402 unjoint_samples 17400 joint_samples 52 [141044, 1025915] processed_samples 17402 unjoint_samples 17400 joint_samples 52 [141044, 1025915] [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 17401 unjoint_samples 17400 joint_samples 52 [1047455, 556311] processed_samples 17401 unjoint_samples 17400 joint_samples 53 [1046774, 590839] processed_samples 17401 unjoint_samples 17400 joint_samples 53 [1046774, 590839] [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561288e7d800] mmco: unref short failure [h264 @ 0x561288e7d800] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128949f040] mmco: unref short failure [h264 @ 0x56128949f040] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure processed_samples 17500 unjoint_samples 17500 joint_samples 52 [1047502, 647049] processed_samples 17501 unjoint_samples 17500 joint_samples 52 [817069, 1044993] processed_samples 17500 unjoint_samples 17500 joint_samples 52 [1047502, 647049] processed_samples 17500 unjoint_samples 17500 joint_samples 52 [823916, 1032816] processed_samples 17500 unjoint_samples 17500 joint_samples 53 [1047303, 442843] processed_samples 17500 unjoint_samples 17500 joint_samples 53 [1047303, 442843] processed_samples 17500 unjoint_samples 17500 joint_samples 52 [823916, 1032816] processed_samples 17501 unjoint_samples 17500 joint_samples 53 [115728, 1045342] processed_samples 17501 unjoint_samples 17500 joint_samples 52 [817069, 1044993] processed_samples 17501 unjoint_samples 17500 joint_samples 52 [1047455, 931422] processed_samples 17501 unjoint_samples 17500 joint_samples 53 [115728, 1045342] processed_samples 17501 unjoint_samples 17500 joint_samples 53 [1046774, 820959] processed_samples 17502 unjoint_samples 17500 joint_samples 52 [449867, 1025915] processed_samples 17501 unjoint_samples 17500 joint_samples 52 [1047455, 931422] processed_samples 17501 unjoint_samples 17500 joint_samples 53 [1046774, 820959] processed_samples 17502 unjoint_samples 17500 joint_samples 52 [449867, 1025915] [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure processed_samples 17600 unjoint_samples 17600 joint_samples 53 [39944, 1046940] processed_samples 17601 unjoint_samples 17600 joint_samples 54 [208610, 1039734] processed_samples 17601 unjoint_samples 17600 joint_samples 53 [1047455, 307105] processed_samples 17601 unjoint_samples 17600 joint_samples 53 [1045219, 86301] processed_samples 17601 unjoint_samples 17600 joint_samples 53 [379501, 1045342] processed_samples 17600 unjoint_samples 17600 joint_samples 53 [1047303, 725833] processed_samples 17602 unjoint_samples 17600 joint_samples 52 [808163, 1025915] [h264 @ 0x55e229effb00] mmco: unref short failure processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1047502, 916147] processed_samples 17600 unjoint_samples 17600 joint_samples 53 [39944, 1046940] processed_samples 17601 unjoint_samples 17600 joint_samples 54 [208610, 1039734] processed_samples 17601 unjoint_samples 17600 joint_samples 53 [1045219, 86301] processed_samples 17601 unjoint_samples 17600 joint_samples 53 [1047455, 307105] processed_samples 17600 unjoint_samples 17600 joint_samples 53 [1047303, 725833] processed_samples 17601 unjoint_samples 17600 joint_samples 53 [379501, 1045342] processed_samples 17602 unjoint_samples 17600 joint_samples 52 [808163, 1025915] [h264 @ 0x561289dbd1c0] mmco: unref short failure processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1047502, 916147] [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229c65e00] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure processed_samples 17700 unjoint_samples 17700 joint_samples 54 [110845, 1039083] processed_samples 17700 unjoint_samples 17700 joint_samples 54 [110845, 1039083] processed_samples 17701 unjoint_samples 17700 joint_samples 54 [517834, 1039734] processed_samples 17700 unjoint_samples 17700 joint_samples 53 [165936, 1028358] processed_samples 17701 unjoint_samples 17700 joint_samples 54 [517834, 1039734] processed_samples 17700 unjoint_samples 17700 joint_samples 53 [165936, 1028358] processed_samples 17701 unjoint_samples 17700 joint_samples 53 [1045219, 366004] processed_samples 17700 unjoint_samples 17700 joint_samples 53 [530895, 1046940] processed_samples 17700 unjoint_samples 17700 joint_samples 53 [530895, 1046940] processed_samples 17701 unjoint_samples 17700 joint_samples 53 [1045219, 366004] processed_samples 17702 unjoint_samples 17700 joint_samples 53 [1046777, 57974] processed_samples 17702 unjoint_samples 17700 joint_samples 53 [1046777, 57974] processed_samples 17701 unjoint_samples 17700 joint_samples 53 [961586, 1045342] processed_samples 17701 unjoint_samples 17700 joint_samples 53 [961586, 1045342] processed_samples 17701 unjoint_samples 17700 joint_samples 53 [1047455, 678385] processed_samples 17701 unjoint_samples 17700 joint_samples 53 [1047455, 678385] [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure processed_samples 17800 unjoint_samples 17800 joint_samples 54 [498516, 1039083] processed_samples 17800 unjoint_samples 17800 joint_samples 53 [871595, 1046940] processed_samples 17800 unjoint_samples 17800 joint_samples 53 [441579, 1028358] processed_samples 17800 unjoint_samples 17800 joint_samples 54 [498516, 1039083] processed_samples 17800 unjoint_samples 17800 joint_samples 53 [871595, 1046940] processed_samples 17801 unjoint_samples 17800 joint_samples 54 [841566, 1039734] processed_samples 17801 unjoint_samples 17800 joint_samples 53 [1047455, 999088] processed_samples 17800 unjoint_samples 17800 joint_samples 53 [441579, 1028358] processed_samples 17801 unjoint_samples 17800 joint_samples 53 [1045219, 657182] [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure processed_samples 17801 unjoint_samples 17800 joint_samples 54 [841566, 1039734] processed_samples 17802 unjoint_samples 17800 joint_samples 53 [1046777, 381224] processed_samples 17801 unjoint_samples 17800 joint_samples 53 [1045219, 657182] processed_samples 17801 unjoint_samples 17800 joint_samples 53 [1047455, 999088] [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure processed_samples 17802 unjoint_samples 17800 joint_samples 53 [1046777, 381224] processed_samples 17801 unjoint_samples 17800 joint_samples 54 [221954, 1045342] processed_samples 17801 unjoint_samples 17800 joint_samples 54 [221954, 1045342] [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x56128b7de900] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561287ddbd00] mmco: unref short failure [h264 @ 0x561287ddbd00] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e22a7b4340] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure processed_samples 17900 unjoint_samples 17900 joint_samples 54 [163993, 1046940] processed_samples 17900 unjoint_samples 17900 joint_samples 53 [684655, 1028358] processed_samples 17900 unjoint_samples 17900 joint_samples 54 [760687, 1039083] processed_samples 17900 unjoint_samples 17900 joint_samples 54 [163993, 1046940] processed_samples 17900 unjoint_samples 17900 joint_samples 53 [684655, 1028358] processed_samples 17900 unjoint_samples 17900 joint_samples 54 [760687, 1039083] processed_samples 17901 unjoint_samples 17900 joint_samples 54 [9338, 1043133] processed_samples 17901 unjoint_samples 17900 joint_samples 54 [9338, 1043133] processed_samples 17901 unjoint_samples 17900 joint_samples 54 [311262, 1028447] processed_samples 17901 unjoint_samples 17900 joint_samples 54 [311262, 1028447] processed_samples 17901 unjoint_samples 17900 joint_samples 55 [203014, 1040338] processed_samples 17901 unjoint_samples 17900 joint_samples 55 [203014, 1040338] processed_samples 17901 unjoint_samples 17900 joint_samples 54 [531295, 1045342] processed_samples 17901 unjoint_samples 17900 joint_samples 54 [531295, 1045342] processed_samples 17902 unjoint_samples 17900 joint_samples 53 [1046777, 717340] processed_samples 17902 unjoint_samples 17900 joint_samples 53 [1046777, 717340] [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e228a6af80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure processed_samples 18000 unjoint_samples 18000 joint_samples 55 [1046646, 94406] processed_samples 18000 unjoint_samples 18000 joint_samples 55 [1046646, 94406] processed_samples 18001 unjoint_samples 18000 joint_samples 54 [262639, 1043133] processed_samples 18000 unjoint_samples 18000 joint_samples 54 [561781, 1046940] processed_samples 18001 unjoint_samples 18000 joint_samples 54 [262639, 1043133] processed_samples 18000 unjoint_samples 18000 joint_samples 54 [561781, 1046940] processed_samples 18001 unjoint_samples 18000 joint_samples 55 [552456, 1040338] processed_samples 18001 unjoint_samples 18000 joint_samples 55 [552456, 1040338] processed_samples 18001 unjoint_samples 18000 joint_samples 54 [982692, 1045342] processed_samples 18001 unjoint_samples 18000 joint_samples 54 [982692, 1045342] processed_samples 18000 unjoint_samples 18000 joint_samples 53 [955188, 1028358] processed_samples 18001 unjoint_samples 18000 joint_samples 54 [603552, 1028447] processed_samples 18001 unjoint_samples 18000 joint_samples 54 [603552, 1028447] processed_samples 18000 unjoint_samples 18000 joint_samples 53 [955188, 1028358] processed_samples 18002 unjoint_samples 18000 joint_samples 53 [1046777, 1012216] processed_samples 18002 unjoint_samples 18000 joint_samples 53 [1046777, 1012216] [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1041765, 161719] [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1041765, 161719] processed_samples 18100 unjoint_samples 18100 joint_samples 55 [1046646, 424435] processed_samples 18101 unjoint_samples 18100 joint_samples 55 [185759, 1045342] processed_samples 18100 unjoint_samples 18100 joint_samples 55 [1046646, 424435] processed_samples 18101 unjoint_samples 18100 joint_samples 55 [888693, 1040338] processed_samples 18100 unjoint_samples 18100 joint_samples 54 [976438, 1046940] processed_samples 18101 unjoint_samples 18100 joint_samples 55 [185759, 1045342] processed_samples 18100 unjoint_samples 18100 joint_samples 54 [976438, 1046940] processed_samples 18101 unjoint_samples 18100 joint_samples 54 [942362, 1028447] processed_samples 18102 unjoint_samples 18100 joint_samples 54 [361171, 1031928] processed_samples 18102 unjoint_samples 18100 joint_samples 54 [361171, 1031928] processed_samples 18101 unjoint_samples 18100 joint_samples 55 [888693, 1040338] processed_samples 18101 unjoint_samples 18100 joint_samples 54 [658791, 1043133] processed_samples 18101 unjoint_samples 18100 joint_samples 54 [658791, 1043133] [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure processed_samples 18101 unjoint_samples 18100 joint_samples 54 [942362, 1028447] [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure processed_samples 18200 unjoint_samples 18200 joint_samples 55 [1046429, 242610] processed_samples 18200 unjoint_samples 18200 joint_samples 55 [1046646, 706516] processed_samples 18200 unjoint_samples 18200 joint_samples 55 [1046429, 242610] processed_samples 18201 unjoint_samples 18200 joint_samples 56 [1035591, 239542] processed_samples 18201 unjoint_samples 18200 joint_samples 55 [178405, 1035822] processed_samples 18202 unjoint_samples 18200 joint_samples 54 [655392, 1031928] processed_samples 18200 unjoint_samples 18200 joint_samples 55 [1046646, 706516] processed_samples 18202 unjoint_samples 18200 joint_samples 54 [655392, 1031928] processed_samples 18201 unjoint_samples 18200 joint_samples 54 [925307, 1043133] processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1041765, 504283] processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1041765, 504283] [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure processed_samples 18201 unjoint_samples 18200 joint_samples 56 [1035591, 239542] processed_samples 18201 unjoint_samples 18200 joint_samples 55 [503048, 1045342] processed_samples 18201 unjoint_samples 18200 joint_samples 55 [178405, 1035822] processed_samples 18201 unjoint_samples 18200 joint_samples 54 [925307, 1043133] processed_samples 18201 unjoint_samples 18200 joint_samples 55 [503048, 1045342] [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x56128ccd9340] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure processed_samples 18301 unjoint_samples 18300 joint_samples 55 [181569, 1045442] processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1046429, 541896] processed_samples 18300 unjoint_samples 18300 joint_samples 54 [1041765, 796406] [h264 @ 0x56128c660f40] mmco: unref short failure processed_samples 18300 unjoint_samples 18300 joint_samples 56 [9971, 1042576] processed_samples 18301 unjoint_samples 18300 joint_samples 55 [181569, 1045442] processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1046429, 541896] processed_samples 18300 unjoint_samples 18300 joint_samples 54 [1041765, 796406] [h264 @ 0x55e229ae21c0] mmco: unref short failure processed_samples 18300 unjoint_samples 18300 joint_samples 56 [9971, 1042576] processed_samples 18301 unjoint_samples 18300 joint_samples 56 [1035591, 652787] processed_samples 18301 unjoint_samples 18300 joint_samples 56 [1035591, 652787] processed_samples 18301 unjoint_samples 18300 joint_samples 55 [869276, 1045342] processed_samples 18301 unjoint_samples 18300 joint_samples 55 [869276, 1045342] processed_samples 18301 unjoint_samples 18300 joint_samples 55 [402721, 1035822] processed_samples 18301 unjoint_samples 18300 joint_samples 55 [402721, 1035822] processed_samples 18302 unjoint_samples 18300 joint_samples 54 [938660, 1031928] processed_samples 18302 unjoint_samples 18300 joint_samples 54 [938660, 1031928] [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure processed_samples 18400 unjoint_samples 18400 joint_samples 56 [317573, 1042576] [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure processed_samples 18400 unjoint_samples 18400 joint_samples 56 [317573, 1042576] [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure processed_samples 18401 unjoint_samples 18400 joint_samples 56 [1046758, 152465] processed_samples 18401 unjoint_samples 18400 joint_samples 56 [1035591, 937034] processed_samples 18401 unjoint_samples 18400 joint_samples 55 [568183, 1045442] processed_samples 18402 unjoint_samples 18400 joint_samples 55 [181066, 1046296] processed_samples 18401 unjoint_samples 18400 joint_samples 56 [1046758, 152465] processed_samples 18401 unjoint_samples 18400 joint_samples 55 [568183, 1045442] processed_samples 18400 unjoint_samples 18400 joint_samples 54 [1041765, 1032314] processed_samples 18400 unjoint_samples 18400 joint_samples 54 [1041765, 1032314] processed_samples 18402 unjoint_samples 18400 joint_samples 55 [181066, 1046296] processed_samples 18401 unjoint_samples 18400 joint_samples 55 [654072, 1035822] processed_samples 18401 unjoint_samples 18400 joint_samples 56 [1035591, 937034] processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1046429, 911833] processed_samples 18401 unjoint_samples 18400 joint_samples 55 [654072, 1035822] processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1046429, 911833] [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure processed_samples 18500 unjoint_samples 18500 joint_samples 55 [297419, 1045652] processed_samples 18500 unjoint_samples 18500 joint_samples 56 [109102, 1047620] processed_samples 18500 unjoint_samples 18500 joint_samples 56 [109102, 1047620] [h264 @ 0x5612898b67c0] mmco: unref short failure processed_samples 18501 unjoint_samples 18500 joint_samples 57 [1037865, 170612] [h264 @ 0x561287ffe7c0] mmco: unref short failure [h264 @ 0x561287ffe7c0] mmco: unref short failure processed_samples 18500 unjoint_samples 18500 joint_samples 56 [578375, 1042576] processed_samples 18500 unjoint_samples 18500 joint_samples 56 [578375, 1042576] processed_samples 18501 unjoint_samples 18500 joint_samples 55 [805237, 1045442] processed_samples 18500 unjoint_samples 18500 joint_samples 55 [297419, 1045652] [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 18501 unjoint_samples 18500 joint_samples 57 [1037865, 170612] processed_samples 18501 unjoint_samples 18500 joint_samples 56 [1046758, 403593] processed_samples 18501 unjoint_samples 18500 joint_samples 55 [805237, 1045442] [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 18501 unjoint_samples 18500 joint_samples 56 [1046758, 403593] processed_samples 18502 unjoint_samples 18500 joint_samples 55 [489402, 1046296] processed_samples 18502 unjoint_samples 18500 joint_samples 55 [489402, 1046296] processed_samples 18501 unjoint_samples 18500 joint_samples 55 [988222, 1035822] [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure processed_samples 18501 unjoint_samples 18500 joint_samples 55 [988222, 1035822] [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure processed_samples 18600 unjoint_samples 18600 joint_samples 56 [429932, 1047620] processed_samples 18601 unjoint_samples 18600 joint_samples 56 [109735, 1045442] processed_samples 18600 unjoint_samples 18600 joint_samples 56 [429932, 1047620] processed_samples 18601 unjoint_samples 18600 joint_samples 56 [109735, 1045442] processed_samples 18600 unjoint_samples 18600 joint_samples 56 [900768, 1042576] processed_samples 18600 unjoint_samples 18600 joint_samples 56 [900768, 1042576] processed_samples 18601 unjoint_samples 18600 joint_samples 57 [1037865, 441808] processed_samples 18601 unjoint_samples 18600 joint_samples 57 [1037865, 441808] processed_samples 18600 unjoint_samples 18600 joint_samples 55 [672802, 1045652] processed_samples 18600 unjoint_samples 18600 joint_samples 55 [672802, 1045652] processed_samples 18601 unjoint_samples 18600 joint_samples 56 [1046415, 349286] processed_samples 18601 unjoint_samples 18600 joint_samples 56 [1046415, 349286] processed_samples 18602 unjoint_samples 18600 joint_samples 55 [741064, 1046296] processed_samples 18601 unjoint_samples 18600 joint_samples 56 [1046758, 871874] processed_samples 18601 unjoint_samples 18600 joint_samples 56 [1046758, 871874] processed_samples 18602 unjoint_samples 18600 joint_samples 55 [741064, 1046296] [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure processed_samples 18700 unjoint_samples 18700 joint_samples 57 [142816, 1046617] [h264 @ 0x56128a751500] mmco: unref short failure processed_samples 18701 unjoint_samples 18700 joint_samples 57 [164233, 1026670] [h264 @ 0x561289b5b1c0] mmco: unref short failure processed_samples 18702 unjoint_samples 18700 joint_samples 56 [1041153, 151596] [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure processed_samples 18701 unjoint_samples 18700 joint_samples 56 [335977, 1045442] processed_samples 18700 unjoint_samples 18700 joint_samples 55 [936033, 1045652] processed_samples 18700 unjoint_samples 18700 joint_samples 57 [142816, 1046617] processed_samples 18701 unjoint_samples 18700 joint_samples 57 [1037865, 769846] [h264 @ 0x55e22ad7b180] mmco: unref short failure processed_samples 18700 unjoint_samples 18700 joint_samples 56 [779280, 1047620] processed_samples 18700 unjoint_samples 18700 joint_samples 56 [779280, 1047620] processed_samples 18701 unjoint_samples 18700 joint_samples 56 [1046415, 572712] processed_samples 18701 unjoint_samples 18700 joint_samples 57 [164233, 1026670] [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 18701 unjoint_samples 18700 joint_samples 56 [335977, 1045442] processed_samples 18701 unjoint_samples 18700 joint_samples 56 [1046415, 572712] processed_samples 18701 unjoint_samples 18700 joint_samples 57 [1037865, 769846] processed_samples 18702 unjoint_samples 18700 joint_samples 56 [1041153, 151596] [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure processed_samples 18700 unjoint_samples 18700 joint_samples 55 [936033, 1045652] [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22b6bd4c0] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure processed_samples 18800 unjoint_samples 18800 joint_samples 56 [1047012, 153213] processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1043563, 132281] processed_samples 18800 unjoint_samples 18800 joint_samples 56 [1047012, 153213] processed_samples 18801 unjoint_samples 18800 joint_samples 58 [54525, 1042879] processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1043563, 132281] processed_samples 18801 unjoint_samples 18800 joint_samples 58 [54525, 1042879] processed_samples 18800 unjoint_samples 18800 joint_samples 57 [526132, 1046617] processed_samples 18801 unjoint_samples 18800 joint_samples 57 [465455, 1026670] processed_samples 18801 unjoint_samples 18800 joint_samples 57 [465455, 1026670] processed_samples 18800 unjoint_samples 18800 joint_samples 57 [526132, 1046617] [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure processed_samples 18802 unjoint_samples 18800 joint_samples 56 [1041153, 488161] processed_samples 18802 unjoint_samples 18800 joint_samples 56 [1041153, 488161] processed_samples 18801 unjoint_samples 18800 joint_samples 56 [660408, 1045442] processed_samples 18801 unjoint_samples 18800 joint_samples 56 [660408, 1045442] processed_samples 18801 unjoint_samples 18800 joint_samples 56 [1046415, 881578] processed_samples 18801 unjoint_samples 18800 joint_samples 56 [1046415, 881578] [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure processed_samples 18901 unjoint_samples 18900 joint_samples 56 [988169, 1045442] processed_samples 18900 unjoint_samples 18900 joint_samples 57 [1043563, 391244] processed_samples 18900 unjoint_samples 18900 joint_samples 56 [1047012, 442590] processed_samples 18901 unjoint_samples 18900 joint_samples 57 [1046415, 198018] processed_samples 18900 unjoint_samples 18900 joint_samples 57 [969017, 1046617] processed_samples 18901 unjoint_samples 18900 joint_samples 57 [751775, 1026670] [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure processed_samples 18901 unjoint_samples 18900 joint_samples 58 [362670, 1042879] processed_samples 18902 unjoint_samples 18900 joint_samples 56 [1041153, 834713] [h264 @ 0x56128a0c96c0] mmco: unref short failure processed_samples 18901 unjoint_samples 18900 joint_samples 57 [1046415, 198018] processed_samples 18901 unjoint_samples 18900 joint_samples 56 [988169, 1045442] processed_samples 18900 unjoint_samples 18900 joint_samples 57 [969017, 1046617] processed_samples 18900 unjoint_samples 18900 joint_samples 57 [1043563, 391244] processed_samples 18900 unjoint_samples 18900 joint_samples 56 [1047012, 442590] processed_samples 18901 unjoint_samples 18900 joint_samples 58 [362670, 1042879] processed_samples 18901 unjoint_samples 18900 joint_samples 57 [751775, 1026670] [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure processed_samples 18902 unjoint_samples 18900 joint_samples 56 [1041153, 834713] [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x561288d12440] mmco: unref short failure [h264 @ 0x561288d12440] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure processed_samples 19001 unjoint_samples 19000 joint_samples 57 [1043749, 327991] processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1043563, 698660] processed_samples 19000 unjoint_samples 19000 joint_samples 58 [414755, 1046617] processed_samples 19001 unjoint_samples 19000 joint_samples 57 [1029580, 1029466] processed_samples 19001 unjoint_samples 19000 joint_samples 57 [1043749, 327991] processed_samples 19002 unjoint_samples 19000 joint_samples 57 [170670, 1040847] processed_samples 19001 unjoint_samples 19000 joint_samples 57 [1046415, 506690] processed_samples 19001 unjoint_samples 19000 joint_samples 58 [581850, 1042879] processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1043563, 698660] processed_samples 19000 unjoint_samples 19000 joint_samples 56 [1047012, 728443] processed_samples 19001 unjoint_samples 19000 joint_samples 57 [1029580, 1029466] processed_samples 19001 unjoint_samples 19000 joint_samples 57 [1046415, 506690] [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure processed_samples 19000 unjoint_samples 19000 joint_samples 58 [414755, 1046617] processed_samples 19002 unjoint_samples 19000 joint_samples 57 [170670, 1040847] processed_samples 19001 unjoint_samples 19000 joint_samples 58 [581850, 1042879] processed_samples 19000 unjoint_samples 19000 joint_samples 56 [1047012, 728443] [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22963f040] mmco: unref short failure [h264 @ 0x55e22963f040] mmco: unref short failure [h264 @ 0x55e22963f040] mmco: unref short failure [h264 @ 0x55e22963f040] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 19100 unjoint_samples 19100 joint_samples 58 [674050, 1046617] processed_samples 19100 unjoint_samples 19100 joint_samples 57 [1043563, 1041268] [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure processed_samples 19101 unjoint_samples 19100 joint_samples 57 [1046415, 858227] processed_samples 19101 unjoint_samples 19100 joint_samples 58 [203565, 1044688] processed_samples 19101 unjoint_samples 19100 joint_samples 57 [1043749, 594476] processed_samples 19101 unjoint_samples 19100 joint_samples 58 [931942, 1042879] processed_samples 19100 unjoint_samples 19100 joint_samples 56 [1047012, 997007] [h264 @ 0x5612915aad40] mmco: unref short failure processed_samples 19102 unjoint_samples 19100 joint_samples 57 [471433, 1040847] [h264 @ 0x55e229effb00] mmco: unref short failure processed_samples 19100 unjoint_samples 19100 joint_samples 58 [674050, 1046617] processed_samples 19100 unjoint_samples 19100 joint_samples 56 [1047012, 997007] processed_samples 19101 unjoint_samples 19100 joint_samples 57 [1043749, 594476] processed_samples 19100 unjoint_samples 19100 joint_samples 57 [1043563, 1041268] processed_samples 19101 unjoint_samples 19100 joint_samples 57 [1046415, 858227] processed_samples 19101 unjoint_samples 19100 joint_samples 58 [203565, 1044688] processed_samples 19102 unjoint_samples 19100 joint_samples 57 [471433, 1040847] processed_samples 19101 unjoint_samples 19100 joint_samples 58 [931942, 1042879] [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure processed_samples 19201 unjoint_samples 19200 joint_samples 57 [1043749, 837558] processed_samples 19201 unjoint_samples 19200 joint_samples 59 [179098, 1047355] processed_samples 19200 unjoint_samples 19200 joint_samples 57 [1047012, 224646] processed_samples 19200 unjoint_samples 19200 joint_samples 58 [1046288, 255052] processed_samples 19201 unjoint_samples 19200 joint_samples 58 [1046415, 151123] processed_samples 19200 unjoint_samples 19200 joint_samples 58 [971888, 1046617] [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x5612894f3a80] mmco: unref short failure processed_samples 19201 unjoint_samples 19200 joint_samples 58 [448347, 1044688] [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure processed_samples 19202 unjoint_samples 19200 joint_samples 57 [820830, 1040847] [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure processed_samples 19201 unjoint_samples 19200 joint_samples 57 [1043749, 837558] processed_samples 19200 unjoint_samples 19200 joint_samples 57 [1047012, 224646] processed_samples 19200 unjoint_samples 19200 joint_samples 58 [1046288, 255052] processed_samples 19201 unjoint_samples 19200 joint_samples 58 [1046415, 151123] processed_samples 19201 unjoint_samples 19200 joint_samples 59 [179098, 1047355] processed_samples 19201 unjoint_samples 19200 joint_samples 58 [448347, 1044688] processed_samples 19200 unjoint_samples 19200 joint_samples 58 [971888, 1046617] processed_samples 19202 unjoint_samples 19200 joint_samples 57 [820830, 1040847] [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22a9483c0] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x5612899c8a40] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x56128f259140] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure [h264 @ 0x55e22962a900] mmco: unref short failure processed_samples 19300 unjoint_samples 19300 joint_samples 59 [147274, 1046617] processed_samples 19301 unjoint_samples 19300 joint_samples 58 [128393, 1007719] processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1046288, 489052] processed_samples 19301 unjoint_samples 19300 joint_samples 59 [455376, 1047355] processed_samples 19300 unjoint_samples 19300 joint_samples 57 [1047012, 542924] processed_samples 19302 unjoint_samples 19300 joint_samples 58 [62464, 1045154] processed_samples 19301 unjoint_samples 19300 joint_samples 58 [712876, 1044688] processed_samples 19300 unjoint_samples 19300 joint_samples 59 [147274, 1046617] processed_samples 19301 unjoint_samples 19300 joint_samples 58 [1046415, 449310] processed_samples 19301 unjoint_samples 19300 joint_samples 58 [128393, 1007719] processed_samples 19301 unjoint_samples 19300 joint_samples 59 [455376, 1047355] processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1046288, 489052] processed_samples 19300 unjoint_samples 19300 joint_samples 57 [1047012, 542924] processed_samples 19301 unjoint_samples 19300 joint_samples 58 [712876, 1044688] processed_samples 19302 unjoint_samples 19300 joint_samples 58 [62464, 1045154] processed_samples 19301 unjoint_samples 19300 joint_samples 58 [1046415, 449310] [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612895b2700] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x55e231b440c0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128cac3e80] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x55e229d05800] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x5612897ffa00] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e2307d2300] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure processed_samples 19401 unjoint_samples 19400 joint_samples 59 [719074, 1047355] processed_samples 19401 unjoint_samples 19400 joint_samples 59 [36792, 1047675] [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure processed_samples 19400 unjoint_samples 19400 joint_samples 59 [412405, 1046617] processed_samples 19401 unjoint_samples 19400 joint_samples 58 [438528, 1007719] processed_samples 19401 unjoint_samples 19400 joint_samples 59 [719074, 1047355] [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure processed_samples 19401 unjoint_samples 19400 joint_samples 59 [36792, 1047675] processed_samples 19400 unjoint_samples 19400 joint_samples 59 [412405, 1046617] processed_samples 19402 unjoint_samples 19400 joint_samples 58 [381908, 1045154] [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure processed_samples 19401 unjoint_samples 19400 joint_samples 58 [438528, 1007719] processed_samples 19400 unjoint_samples 19400 joint_samples 57 [1047012, 886106] processed_samples 19400 unjoint_samples 19400 joint_samples 58 [1046288, 768349] processed_samples 19401 unjoint_samples 19400 joint_samples 58 [1046415, 778534] [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure processed_samples 19402 unjoint_samples 19400 joint_samples 58 [381908, 1045154] processed_samples 19400 unjoint_samples 19400 joint_samples 57 [1047012, 886106] [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure processed_samples 19400 unjoint_samples 19400 joint_samples 58 [1046288, 768349] [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure processed_samples 19401 unjoint_samples 19400 joint_samples 58 [1046415, 778534] [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288d5f900] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x55e228bad300] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x5612894a4440] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure processed_samples 19500 unjoint_samples 19500 joint_samples 59 [1046776, 25281] processed_samples 19500 unjoint_samples 19500 joint_samples 59 [1046776, 25281] processed_samples 19501 unjoint_samples 19500 joint_samples 60 [1008263, 69950] processed_samples 19500 unjoint_samples 19500 joint_samples 58 [112370, 1046654] processed_samples 19500 unjoint_samples 19500 joint_samples 59 [730039, 1046617] processed_samples 19500 unjoint_samples 19500 joint_samples 58 [112370, 1046654] processed_samples 19501 unjoint_samples 19500 joint_samples 60 [1008263, 69950] processed_samples 19501 unjoint_samples 19500 joint_samples 59 [25250, 1045180] processed_samples 19500 unjoint_samples 19500 joint_samples 59 [730039, 1046617] processed_samples 19501 unjoint_samples 19500 joint_samples 59 [25250, 1045180] [h264 @ 0x561289f0e680] mmco: unref short failure processed_samples 19501 unjoint_samples 19500 joint_samples 58 [703000, 1007719] processed_samples 19501 unjoint_samples 19500 joint_samples 59 [359198, 1047675] processed_samples 19501 unjoint_samples 19500 joint_samples 59 [359198, 1047675] [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure processed_samples 19502 unjoint_samples 19500 joint_samples 58 [656578, 1045154] processed_samples 19501 unjoint_samples 19500 joint_samples 58 [703000, 1007719] [h264 @ 0x55e229d0e180] mmco: unref short failure processed_samples 19502 unjoint_samples 19500 joint_samples 58 [656578, 1045154] [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a89c640] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x55e22aa8b540] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x56128dae7c80] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure processed_samples 19600 unjoint_samples 19600 joint_samples 60 [23692, 1046617] processed_samples 19600 unjoint_samples 19600 joint_samples 60 [23692, 1046617] processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1046776, 360576] processed_samples 19601 unjoint_samples 19600 joint_samples 59 [800099, 1047675] processed_samples 19600 unjoint_samples 19600 joint_samples 58 [399905, 1046654] processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1046776, 360576] processed_samples 19601 unjoint_samples 19600 joint_samples 59 [800099, 1047675] processed_samples 19600 unjoint_samples 19600 joint_samples 58 [399905, 1046654] processed_samples 19601 unjoint_samples 19600 joint_samples 58 [980363, 1007719] processed_samples 19601 unjoint_samples 19600 joint_samples 58 [980363, 1007719] processed_samples 19601 unjoint_samples 19600 joint_samples 59 [369251, 1045180] processed_samples 19601 unjoint_samples 19600 joint_samples 60 [1008263, 370986] processed_samples 19601 unjoint_samples 19600 joint_samples 60 [1008263, 370986] processed_samples 19601 unjoint_samples 19600 joint_samples 59 [369251, 1045180] processed_samples 19602 unjoint_samples 19600 joint_samples 58 [975794, 1045154] processed_samples 19602 unjoint_samples 19600 joint_samples 58 [975794, 1045154] [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x561289ff4fc0] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e228fe45c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x561288bc0640] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x5612892544c0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22945c980] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x55e229ee3840] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [mov,mp4,m4a,3gp,3g2,mj2 @ 0x56128f2cbe00] stream 0, offset 0x90050f: partial file [h264 @ 0x55e22d410bc0] mmco: unref short failure [mov,mp4,m4a,3gp,3g2,mj2 @ 0x55e22914b880] stream 0, offset 0x90050f: partial file [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x561288b2c5c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure processed_samples 19700 unjoint_samples 19700 joint_samples 60 [334797, 1046617] processed_samples 19700 unjoint_samples 19700 joint_samples 60 [334797, 1046617] processed_samples 19700 unjoint_samples 19700 joint_samples 58 [694638, 1046654] processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1046776, 646431] processed_samples 19700 unjoint_samples 19700 joint_samples 58 [694638, 1046654] processed_samples 19701 unjoint_samples 19700 joint_samples 60 [1034363, 109519] processed_samples 19702 unjoint_samples 19700 joint_samples 59 [1045546, 205593] processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1046776, 646431] processed_samples 19701 unjoint_samples 19700 joint_samples 59 [652274, 1045180] processed_samples 19701 unjoint_samples 19700 joint_samples 60 [1034363, 109519] processed_samples 19702 unjoint_samples 19700 joint_samples 59 [1045546, 205593] processed_samples 19702 unjoint_samples 19700 joint_samples 59 [352981, 1047250] processed_samples 19701 unjoint_samples 19700 joint_samples 59 [652274, 1045180] processed_samples 19701 unjoint_samples 19700 joint_samples 60 [1008263, 667214] processed_samples 19702 unjoint_samples 19700 joint_samples 59 [352981, 1047250] processed_samples 19701 unjoint_samples 19700 joint_samples 60 [1008263, 667214] [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x55e22b30be80] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x561288fbcec0] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x55e231935b00] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x56128f5368c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure processed_samples 19800 unjoint_samples 19800 joint_samples 60 [624968, 1046617] processed_samples 19801 unjoint_samples 19800 joint_samples 60 [1008263, 905354] processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1046776, 873137] processed_samples 19801 unjoint_samples 19800 joint_samples 60 [1034363, 429452] processed_samples 19800 unjoint_samples 19800 joint_samples 60 [624968, 1046617] processed_samples 19802 unjoint_samples 19800 joint_samples 59 [609186, 1047250] processed_samples 19802 unjoint_samples 19800 joint_samples 59 [1045546, 481285] processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1046776, 873137] processed_samples 19801 unjoint_samples 19800 joint_samples 60 [1008263, 905354] processed_samples 19801 unjoint_samples 19800 joint_samples 60 [1034363, 429452] processed_samples 19801 unjoint_samples 19800 joint_samples 59 [1003487, 1045180] processed_samples 19800 unjoint_samples 19800 joint_samples 58 [995000, 1046654] processed_samples 19802 unjoint_samples 19800 joint_samples 59 [609186, 1047250] processed_samples 19802 unjoint_samples 19800 joint_samples 59 [1045546, 481285] processed_samples 19801 unjoint_samples 19800 joint_samples 59 [1003487, 1045180] processed_samples 19800 unjoint_samples 19800 joint_samples 58 [995000, 1046654] [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [mov,mp4,m4a,3gp,3g2,mj2 @ 0x55e228a2a840] stream 1, offset 0x14000d8: partial file [mov,mp4,m4a,3gp,3g2,mj2 @ 0x56128dbf4e00] stream 1, offset 0x14000d8: partial file [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x56128b79b340] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x5612898c5680] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x55e22b6eaac0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure processed_samples 19900 unjoint_samples 19900 joint_samples 59 [243181, 1047136] processed_samples 19901 unjoint_samples 19900 joint_samples 60 [1046776, 171992] processed_samples 19901 unjoint_samples 19900 joint_samples 60 [1034363, 806038] processed_samples 19901 unjoint_samples 19900 joint_samples 60 [312848, 1045180] processed_samples 19900 unjoint_samples 19900 joint_samples 59 [243181, 1047136] processed_samples 19901 unjoint_samples 19900 joint_samples 61 [179222, 1037164] processed_samples 19902 unjoint_samples 19900 joint_samples 59 [1045546, 737122] processed_samples 19900 unjoint_samples 19900 joint_samples 60 [1025187, 1046617] processed_samples 19901 unjoint_samples 19900 joint_samples 60 [1046776, 171992] processed_samples 19901 unjoint_samples 19900 joint_samples 60 [1034363, 806038] processed_samples 19901 unjoint_samples 19900 joint_samples 60 [312848, 1045180] processed_samples 19901 unjoint_samples 19900 joint_samples 61 [179222, 1037164] processed_samples 19902 unjoint_samples 19900 joint_samples 59 [949990, 1047250] [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure processed_samples 19902 unjoint_samples 19900 joint_samples 59 [1045546, 737122] processed_samples 19900 unjoint_samples 19900 joint_samples 60 [1025187, 1046617] processed_samples 19902 unjoint_samples 19900 joint_samples 59 [949990, 1047250] [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x55e229710280] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x56128979f6c0] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x55e229994e40] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x561288a082c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x55e229cf1440] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x56128a0be8c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x55e22cffd980] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure processed_samples 20000 unjoint_samples 20000 joint_samples 61 [1036450, 367169] processed_samples 20000 unjoint_samples 20000 joint_samples 61 [1036450, 367169] processed_samples 20000 unjoint_samples 20000 joint_samples 59 [483031, 1047136] processed_samples 20000 unjoint_samples 20000 joint_samples 59 [483031, 1047136] processed_samples 20001 unjoint_samples 20000 joint_samples 60 [601297, 1045180] processed_samples 20001 unjoint_samples 20000 joint_samples 60 [1046776, 496337] processed_samples 20001 unjoint_samples 20000 joint_samples 60 [601297, 1045180] processed_samples 20001 unjoint_samples 20000 joint_samples 60 [1046776, 496337] processed_samples 20001 unjoint_samples 20000 joint_samples 61 [1044668, 62680] processed_samples 20001 unjoint_samples 20000 joint_samples 61 [1044668, 62680] processed_samples 20001 unjoint_samples 20000 joint_samples 61 [439502, 1037164] processed_samples 20001 unjoint_samples 20000 joint_samples 61 [439502, 1037164] processed_samples 20002 unjoint_samples 20000 joint_samples 60 [1038969, 272317] processed_samples 20002 unjoint_samples 20000 joint_samples 60 [1038969, 272317] processed_samples 20002 unjoint_samples 20000 joint_samples 59 [1045546, 1020237] processed_samples 20002 unjoint_samples 20000 joint_samples 59 [1045546, 1020237] [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e229f67f00] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x56128ca78940] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x55e22cc27500] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x56128d994d40] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x55e229995600] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x5612664cc640] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure processed_samples 20101 unjoint_samples 20100 joint_samples 61 [1044668, 293210] [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure processed_samples 20100 unjoint_samples 20100 joint_samples 61 [1036450, 696546] [h264 @ 0x561288fa6740] mmco: unref short failure processed_samples 20101 unjoint_samples 20100 joint_samples 61 [802088, 1037164] processed_samples 20102 unjoint_samples 20100 joint_samples 60 [196195, 1046664] processed_samples 20102 unjoint_samples 20100 joint_samples 60 [1038969, 601412] [h264 @ 0x55e22bb7c400] mmco: unref short failure processed_samples 20101 unjoint_samples 20100 joint_samples 60 [1046776, 734444] processed_samples 20100 unjoint_samples 20100 joint_samples 59 [807162, 1047136] processed_samples 20101 unjoint_samples 20100 joint_samples 60 [925398, 1045180] [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure processed_samples 20101 unjoint_samples 20100 joint_samples 61 [1044668, 293210] [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure processed_samples 20100 unjoint_samples 20100 joint_samples 61 [1036450, 696546] [h264 @ 0x55e22aaa72c0] mmco: unref short failure processed_samples 20101 unjoint_samples 20100 joint_samples 61 [802088, 1037164] processed_samples 20102 unjoint_samples 20100 joint_samples 60 [196195, 1046664] processed_samples 20100 unjoint_samples 20100 joint_samples 59 [807162, 1047136] processed_samples 20102 unjoint_samples 20100 joint_samples 60 [1038969, 601412] [h264 @ 0x5612894f3a80] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure [h264 @ 0x561289953280] mmco: unref short failure processed_samples 20101 unjoint_samples 20100 joint_samples 60 [1046776, 734444] processed_samples 20101 unjoint_samples 20100 joint_samples 60 [925398, 1045180] [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x56128a764040] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x55e228b87a80] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x55e229e16240] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128cc7b5c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x56128a0c96c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x561287dca040] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure processed_samples 20200 unjoint_samples 20200 joint_samples 60 [82092, 1047136] processed_samples 20200 unjoint_samples 20200 joint_samples 60 [82092, 1047136] processed_samples 20201 unjoint_samples 20200 joint_samples 62 [101372, 1047381] processed_samples 20201 unjoint_samples 20200 joint_samples 62 [101372, 1047381] processed_samples 20201 unjoint_samples 20200 joint_samples 61 [1044668, 696955] processed_samples 20200 unjoint_samples 20200 joint_samples 61 [1036450, 926395] processed_samples 20200 unjoint_samples 20200 joint_samples 61 [1036450, 926395] processed_samples 20201 unjoint_samples 20200 joint_samples 61 [1044668, 696955] processed_samples 20201 unjoint_samples 20200 joint_samples 61 [1040826, 153406] processed_samples 20201 unjoint_samples 20200 joint_samples 61 [1040826, 153406] processed_samples 20202 unjoint_samples 20200 joint_samples 60 [526765, 1046664] processed_samples 20202 unjoint_samples 20200 joint_samples 60 [526765, 1046664] processed_samples 20202 unjoint_samples 20200 joint_samples 60 [1038969, 944879] processed_samples 20202 unjoint_samples 20200 joint_samples 60 [1038969, 944879] processed_samples 20201 unjoint_samples 20200 joint_samples 60 [1046776, 1018228] processed_samples 20201 unjoint_samples 20200 joint_samples 60 [1046776, 1018228] [h264 @ 0x55e228f8a880] co located POCs unavailable [h264 @ 0x561289b5b1c0] co located POCs unavailable [h264 @ 0x55e228f8a880] co located POCs unavailable [h264 @ 0x55e228f8a880] co located POCs unavailable [h264 @ 0x561289b5b1c0] co located POCs unavailable [h264 @ 0x561289b5b1c0] co located POCs unavailable [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x55e22a31be00] mmco: unref short failure [h264 @ 0x55e22a31be00] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x55e229ae21c0] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128f1cad80] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x56128a544b80] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561289214a40] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x561289b58e80] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e22d346ac0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e228f8a880] mmco: unref short failure [h264 @ 0x55e229fe4680] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x55e22a960b00] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56129050d380] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure [h264 @ 0x55e2293f4500] mmco: unref short failure processed_samples 20300 unjoint_samples 20300 joint_samples 62 [133253, 1046927] processed_samples 20301 unjoint_samples 20300 joint_samples 62 [375846, 1047381] processed_samples 20300 unjoint_samples 20300 joint_samples 60 [402031, 1047136] processed_samples 20301 unjoint_samples 20300 joint_samples 61 [1040826, 507616] processed_samples 20302 unjoint_samples 20300 joint_samples 61 [1046993, 188090] processed_samples 20301 unjoint_samples 20300 joint_samples 61 [287856, 1026077] processed_samples 20300 unjoint_samples 20300 joint_samples 62 [133253, 1046927] processed_samples 20302 unjoint_samples 20300 joint_samples 60 [992682, 1046664] processed_samples 20301 unjoint_samples 20300 joint_samples 61 [1044668, 989764] processed_samples 20301 unjoint_samples 20300 joint_samples 62 [375846, 1047381] processed_samples 20300 unjoint_samples 20300 joint_samples 60 [402031, 1047136] processed_samples 20301 unjoint_samples 20300 joint_samples 61 [1040826, 507616] processed_samples 20301 unjoint_samples 20300 joint_samples 61 [287856, 1026077] processed_samples 20302 unjoint_samples 20300 joint_samples 61 [1046993, 188090] processed_samples 20302 unjoint_samples 20300 joint_samples 60 [992682, 1046664] processed_samples 20301 unjoint_samples 20300 joint_samples 61 [1044668, 989764] [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x56128cd84c00] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22961e8c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128de45440] mmco: unref short failure [h264 @ 0x55e22aaa72c0] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x56128cd39380] mmco: unref short failure [h264 @ 0x55e229510940] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x5612890a5e80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22a9ea780] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e228d11500] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22944e2c0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x56128a51af00] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x55e2290ef580] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x561289fe1340] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure processed_samples 20400 unjoint_samples 20400 joint_samples 62 [472059, 1046927] processed_samples 20401 unjoint_samples 20400 joint_samples 61 [586063, 1026077] processed_samples 20401 unjoint_samples 20400 joint_samples 62 [1046948, 236184] [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x55e2299b4ec0] mmco: unref short failure [h264 @ 0x56128cbd5480] mmco: unref short failure processed_samples 20401 unjoint_samples 20400 joint_samples 62 [632590, 1047381] processed_samples 20402 unjoint_samples 20400 joint_samples 61 [265540, 1046664] processed_samples 20402 unjoint_samples 20400 joint_samples 61 [1046993, 472661] processed_samples 20400 unjoint_samples 20400 joint_samples 60 [730550, 1047136] [h264 @ 0x55e22bb7c400] mmco: unref short failure processed_samples 20401 unjoint_samples 20400 joint_samples 61 [1040826, 792465] processed_samples 20400 unjoint_samples 20400 joint_samples 62 [472059, 1046927] [h264 @ 0x561289dbaec0] mmco: unref short failure [h264 @ 0x561289dbaec0] mmco: unref short failure processed_samples 20401 unjoint_samples 20400 joint_samples 61 [586063, 1026077] processed_samples 20401 unjoint_samples 20400 joint_samples 62 [1046948, 236184] processed_samples 20401 unjoint_samples 20400 joint_samples 62 [632590, 1047381] processed_samples 20402 unjoint_samples 20400 joint_samples 61 [265540, 1046664] processed_samples 20402 unjoint_samples 20400 joint_samples 61 [1046993, 472661] processed_samples 20400 unjoint_samples 20400 joint_samples 60 [730550, 1047136] [h264 @ 0x561289fb1cc0] mmco: unref short failure processed_samples 20401 unjoint_samples 20400 joint_samples 61 [1040826, 792465] [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e22f2216c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e22a54d380] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128a054d40] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128cde2540] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x56128b1ad400] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x55e229bee340] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x55e229eee4c0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228b99ac0] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x56128942d500] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x56128803b9c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x55e229ba63c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x561289dbd1c0] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x5612915aad40] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x55e229d0e180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1039574, 99069] processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1039574, 99069] processed_samples 20500 unjoint_samples 20500 joint_samples 63 [1045167, 16252] processed_samples 20500 unjoint_samples 20500 joint_samples 63 [1045167, 16252] [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure processed_samples 20501 unjoint_samples 20500 joint_samples 62 [1042950, 190730] processed_samples 20501 unjoint_samples 20500 joint_samples 62 [1042950, 190730] processed_samples 20502 unjoint_samples 20500 joint_samples 61 [586764, 1046664] processed_samples 20501 unjoint_samples 20500 joint_samples 61 [991113, 1026077] processed_samples 20502 unjoint_samples 20500 joint_samples 61 [586764, 1046664] processed_samples 20501 unjoint_samples 20500 joint_samples 61 [991113, 1026077] processed_samples 20501 unjoint_samples 20500 joint_samples 62 [1046948, 536114] processed_samples 20501 unjoint_samples 20500 joint_samples 62 [1046948, 536114] processed_samples 20501 unjoint_samples 20500 joint_samples 62 [916998, 1047381] processed_samples 20501 unjoint_samples 20500 joint_samples 62 [916998, 1047381] processed_samples 20502 unjoint_samples 20500 joint_samples 61 [1046993, 796550] processed_samples 20502 unjoint_samples 20500 joint_samples 61 [1046993, 796550] [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e228c81180] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x56128c7596c0] mmco: unref short failure [h264 @ 0x561289adebc0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x55e22a2771c0] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x561289934100] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x55e22b3bf7c0] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x56128cd1c740] mmco: unref short failure [h264 @ 0x55e22bb40580] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x561289115f80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x561288e3e980] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x56128b6efc40] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561289dfadc0] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x56128900a440] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e22a926d00] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x56128dd01e00] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x56128e0636c0] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure processed_samples 20600 unjoint_samples 20600 joint_samples 63 [1045167, 305214] processed_samples 20601 unjoint_samples 20600 joint_samples 63 [1047248, 127136] processed_samples 20600 unjoint_samples 20600 joint_samples 61 [1039574, 344327] processed_samples 20601 unjoint_samples 20600 joint_samples 62 [1042950, 562392] processed_samples 20601 unjoint_samples 20600 joint_samples 62 [300779, 1028566] processed_samples 20602 unjoint_samples 20600 joint_samples 62 [82924, 1044363] processed_samples 20601 unjoint_samples 20600 joint_samples 62 [1046948, 922459] [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure processed_samples 20602 unjoint_samples 20600 joint_samples 61 [854021, 1046664] processed_samples 20600 unjoint_samples 20600 joint_samples 63 [1045167, 305214] processed_samples 20601 unjoint_samples 20600 joint_samples 63 [1047248, 127136] processed_samples 20600 unjoint_samples 20600 joint_samples 61 [1039574, 344327] processed_samples 20601 unjoint_samples 20600 joint_samples 62 [1042950, 562392] processed_samples 20601 unjoint_samples 20600 joint_samples 62 [300779, 1028566] processed_samples 20602 unjoint_samples 20600 joint_samples 62 [82924, 1044363] processed_samples 20601 unjoint_samples 20600 joint_samples 62 [1046948, 922459] processed_samples 20602 unjoint_samples 20600 joint_samples 61 [854021, 1046664] [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22949b6c0] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e22a9e1b40] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x55e229e89400] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x56128c660f40] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561288fff700] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x5612915b0b40] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x56128ab78440] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x55e22b5d3980] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x55e2294b6d80] mmco: unref short failure [h264 @ 0x561287ec2940] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x561289658800] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x55e22d087040] mmco: unref short failure [h264 @ 0x5612898b67c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e228c41e40] mmco: unref short failure [h264 @ 0x55e22d073ac0] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288faff40] mmco: unref short failure [h264 @ 0x561288d0b500] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e22d410bc0] mmco: unref short failure [h264 @ 0x55e230bee800] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c9a4700] mmco: unref short failure [h264 @ 0x56128c338ac0] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x55e230455400] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x56128884fb40] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x55e22f778500] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e22d05a140] mmco: unref short failure [h264 @ 0x56128c7a2700] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x55e229572500] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x5612895dd5c0] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x55e22b215b40] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x5612895ece80] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x55e22f798140] mmco: unref short failure [h264 @ 0x56128ab03c00] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure processed_samples 20700 unjoint_samples 20700 joint_samples 63 [1045167, 515328] processed_samples 20701 unjoint_samples 20700 joint_samples 63 [1046948, 115077] processed_samples 20702 unjoint_samples 20700 joint_samples 62 [76805, 1046664] processed_samples 20700 unjoint_samples 20700 joint_samples 63 [1045167, 515328] processed_samples 20701 unjoint_samples 20700 joint_samples 63 [1046948, 115077] processed_samples 20701 unjoint_samples 20700 joint_samples 62 [580311, 1028566] processed_samples 20702 unjoint_samples 20700 joint_samples 62 [393769, 1044363] processed_samples 20701 unjoint_samples 20700 joint_samples 63 [1047248, 475783] processed_samples 20701 unjoint_samples 20700 joint_samples 62 [580311, 1028566] processed_samples 20700 unjoint_samples 20700 joint_samples 61 [1039574, 775143] processed_samples 20701 unjoint_samples 20700 joint_samples 62 [1042950, 875859] processed_samples 20702 unjoint_samples 20700 joint_samples 62 [76805, 1046664] processed_samples 20701 unjoint_samples 20700 joint_samples 63 [1047248, 475783] processed_samples 20701 unjoint_samples 20700 joint_samples 62 [1042950, 875859] processed_samples 20702 unjoint_samples 20700 joint_samples 62 [393769, 1044363] processed_samples 20700 unjoint_samples 20700 joint_samples 61 [1039574, 775143] [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x561288ad8f80] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x561289dcd240] mmco: unref short failure [h264 @ 0x55e229effb00] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x55e22a4b04c0] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x56128d17f380] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x55e22aae1740] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x56128a101380] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x55e2292cbf40] mmco: unref short failure [h264 @ 0x561289fb1cc0] mmco: unref short failure [h264 @ 0x5612900c9100] mmco: unref short failure [h264 @ 0x55e229eadb40] mmco: unref short failure [mov,mp4,m4a,3gp,3g2,mj2 @ 0x561288bc5a40] stream 1, offset 0x1400a4d: partial file [h264 @ 0x55e228d11500] mmco: unref short failure [mov,mp4,m4a,3gp,3g2,mj2 @ 0x55e22de17800] stream 1, offset 0x1400a4d: partial file [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x5612909bc600] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x561289f0e680] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e22ad7b180] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e229b23540] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x55e22a5f6bc0] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561288fa6740] mmco: unref short failure [h264 @ 0x561289b5b1c0] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22c2ae040] mmco: unref short failure [h264 @ 0x55e22bb7c400] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x55e22d201e00] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x56128f3de880] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e22a9dd6c0] mmco: unref short failure [h264 @ 0x55e228b387c0] mmco: unref short failure [h264 @ 0x56129017c880] mmco: unref short failure [h264 @ 0x5612898c33c0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e22d0028c0] mmco: unref short failure [h264 @ 0x56128976f500] mmco: unref short failure [h264 @ 0x56128a751500] mmco: unref short failure [h264 @ 0x55e22b2b5b40] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x56128cd6ad00] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e22d04fac0] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x55e229fde280] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x56128a018cc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x55e228becbc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [h264 @ 0x5612888a6fc0] mmco: unref short failure [2024-12-01 22:44:27,575] torch.distributed.elastic.agent.server.api: [ERROR] Error waiting on exit barrier. Elapsed: 300.1078107357025 seconds + set +x