From 680513b7771a29f27cbbb3ffb009a69a913de6f9 Mon Sep 17 00:00:00 2001 From: YurenHao0426 Date: Tue, 27 Jan 2026 12:15:45 -0600 Subject: local reward model --- scripts/test_reward_cmp_15667024.err | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 scripts/test_reward_cmp_15667024.err (limited to 'scripts/test_reward_cmp_15667024.err') diff --git a/scripts/test_reward_cmp_15667024.err b/scripts/test_reward_cmp_15667024.err new file mode 100644 index 0000000..3fc1f6a --- /dev/null +++ b/scripts/test_reward_cmp_15667024.err @@ -0,0 +1,32 @@ +/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/transformers/utils/hub.py:110: FutureWarning: Using `TRANSFORMERS_CACHE` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead. + warnings.warn( +`torch_dtype` is deprecated! Use `dtype` instead! +Traceback (most recent call last): + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 382, in + main() + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 378, in main + asyncio.run(run_comparison(args.local_model, args.device)) + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/asyncio/runners.py", line 190, in run + return runner.run(main) + ^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/asyncio/runners.py", line 118, in run + return self._loop.run_until_complete(task) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/asyncio/base_events.py", line 654, in run_until_complete + return future.result() + ^^^^^^^^^^^^^^^ + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 283, in run_comparison + gpt_result, gpt_raw = await gpt_judge.judge( + ^^^^^^^^^^^^^^^^^^^^^^ + File "/projects/bfqt/users/yurenh2/ml-projects/personalization-user-model/scripts/test_reward_comparison.py", line 242, in judge + response = await self.client.chat.completions.create( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/openai/resources/chat/completions/completions.py", line 2678, in create + return await self._post( + ^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/openai/_base_client.py", line 1797, in post + return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/u/yurenh2/miniforge3/envs/eval/lib/python3.11/site-packages/openai/_base_client.py", line 1597, in request + raise self._make_status_error_from_response(err.response) from None +openai.BadRequestError: Error code: 400 - {'error': {'message': "Unsupported parameter: 'max_tokens' is not supported with this model. Use 'max_completion_tokens' instead.", 'type': 'invalid_request_error', 'param': 'max_tokens', 'code': 'unsupported_parameter'}} -- cgit v1.2.3