{
"name": "TypeError",
"message": "callback must be an instance of `TrainingCallback`.",
"stack": "---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[11], line 8
3 log_file = 'cuda_tuning.json'
5 #tvmc.tune(model, target='cuda', enable_autoscheduler=True, tuning_records=log_file)
6
7 # tune again
----> 8 tvmc.tune(model, target='cuda', enable_autoscheduler=True, prior_records=log_file, tuning_records=log_file)
File ~/miniconda3/envs/tvm-cu116-py310/lib/python3.10/site-packages/tvm/driver/tvmc/autotuner.py:623, in tune_model(tvmc_model, target, tuning_records, prior_records, enable_autoscheduler, rpc_key, hostname, port, trials, target_host, tuner, min_repeat_ms, early_stopping, timeout, repeat, number, parallel, hardware_params, include_simple_tasks, log_estimated_latency, additional_target_options, tasks_filter, desired_layout, desired_layout_ops, mixed_precision, mixed_precision_ops, mixed_precision_calculation_type, mixed_precision_acc_type)
620 logger.info(\"Autoscheduling with configuration: %s\", tuning_options)
622 # Schedule the tasks (i.e., produce a schedule for each task)
--> 623 schedule_tasks(tasks, weights, tuning_options, prior_records, log_estimated_latency)
624 else:
625 # In autotvm, trials is specified per task. We can convert the per-model input
626 # provided to per-task trials by dividing by the number of tasks.
627 trials = int(max(1, trials / max(len(tasks), 1)))
File ~/miniconda3/envs/tvm-cu116-py310/lib/python3.10/site-packages/tvm/driver/tvmc/autotuner.py:772, in schedule_tasks(tasks, task_weights, tuning_options, prior_records, log_estimated_latency)
767 tuner = auto_scheduler.TaskScheduler(
768 tasks, task_weights, load_log_file=prior_records, callbacks=callbacks
769 )
771 # Tune the tasks
--> 772 tuner.tune(tuning_options)
File ~/miniconda3/envs/tvm-cu116-py310/lib/python3.10/site-packages/tvm/auto_scheduler/task_scheduler.py:342, in TaskScheduler.tune(self, tune_option, search_policy, search_policy_params, adaptive_training, per_task_early_stopping)
339 self._restore_status(self.load_log_file, self.num_measures_per_round)
341 # make one search policy for one task
--> 342 self.search_policies = make_search_policies(
343 search_policy,
344 search_policy_params,
345 self.tasks,
346 self.num_measures_per_round,
347 tune_option.verbose,
348 self.load_model_file,
349 self.load_log_file,
350 adaptive_training,
351 )
353 # do a round robin first to warm up
354 for idx in range(len(self.tasks)):
355 # skip warming up this task if it has been tuned before (restored from the log file)
File ~/miniconda3/envs/tvm-cu116-py310/lib/python3.10/site-packages/tvm/auto_scheduler/task_scheduler.py:99, in make_search_policies(search_policy, search_policy_params, tasks, num_measures_per_round, verbose, load_model_file, load_log_file, adaptive_training)
97 elif load_log_file:
98 logger.info(\"TaskScheduler: Reload measured states and train the model...\")
---> 99 cost_model.update_from_file(load_log_file)
100 elif model_type == \"random\":
101 cost_model = RandomModel()
File ~/miniconda3/envs/tvm-cu116-py310/lib/python3.10/site-packages/tvm/auto_scheduler/cost_model/xgb_model.py:324, in XGBModel.update_from_file(self, file_name, n_lines)
322 inputs, results = RecordReader(file_name).read_lines(n_lines)
323 logger.info(\"XGBModel: Loaded %s measurement records from %s\", len(inputs), file_name)
--> 324 self.update(inputs, results)
File ~/miniconda3/envs/tvm-cu116-py310/lib/python3.10/site-packages/tvm/auto_scheduler/cost_model/xgb_model.py:204, in XGBModel.update(self, inputs, results)
199 dtrain = pack_sum_xgbmatrix(
200 features, normalized_throughputs, task_ids, normalized_throughputs
201 )
203 # train xgb model
--> 204 self.bst = xgb.train(
205 self.xgb_params,
206 dtrain,
207 num_boost_round=10000,
208 obj=pack_sum_square_error,
209 callbacks=[
210 CustomCallback(
211 stopping_rounds=50,
212 metric=\"tr-p-rmse\",
213 fevals=[pack_sum_rmse, pack_sum_average_peak_score(self.plan_size)],
214 evals=[(dtrain, \"tr\")],
215 maximize=False,
216 verbose_eval=self.verbose_eval,
217 )
218 ],
219 )
221 # Update the model file if it has been set
222 if self.model_file:
File ~/miniconda3/envs/tvm-cu116-py310/lib/python3.10/site-packages/xgboost/core.py:730, in require_keyword_args.<locals>.throw_if.<locals>.inner_f(*args, **kwargs)
728 for k, arg in zip(sig.parameters, args):
729 kwargs[k] = arg
--> 730 return func(**kwargs)
File ~/miniconda3/envs/tvm-cu116-py310/lib/python3.10/site-packages/xgboost/training.py:167, in train(params, dtrain, num_boost_round, evals, obj, feval, maximize, early_stopping_rounds, evals_result, verbose_eval, xgb_model, callbacks, custom_metric)
165 if early_stopping_rounds:
166 callbacks.append(EarlyStopping(rounds=early_stopping_rounds, maximize=maximize))
--> 167 cb_container = CallbackContainer(
168 callbacks,
169 metric=metric_fn,
170 # For old `feval` parameter, the behavior is unchanged. For the new
171 # `custom_metric`, it will receive proper prediction result when custom objective
172 # is not used.
173 output_margin=callable(obj) or metric_fn is feval,
174 )
176 bst = cb_container.before_training(bst)
178 for i in range(start_iteration, num_boost_round):
File ~/miniconda3/envs/tvm-cu116-py310/lib/python3.10/site-packages/xgboost/callback.py:139, in CallbackContainer.__init__(self, callbacks, metric, output_margin, is_cv)
137 for cb in callbacks:
138 if not isinstance(cb, TrainingCallback):
--> 139 raise TypeError(\"callback must be an instance of `TrainingCallback`.\")
141 msg = (
142 \"metric must be callable object for monitoring. For builtin metrics\"
143 \", passing them in training parameter invokes monitor automatically.\"
144 )
145 if metric is not None and not callable(metric):
TypeError: callback must be an instance of `TrainingCallback`."
}
I had trouble building from source with cuda so am using a .whl that I downloaded.
tvm-cu116-py310) nyck33@lenovo-gtx1650:/mnt/d/TVM/tvm/tvm-tutorials$ conda list
# packages in environment at /home/nyck33/miniconda3/envs/tvm-cu116-py310:
#
# Name Version Build Channel
_libgcc_mutex 0.1 main
_openmp_mutex 5.1 1_gnu
asttokens 2.4.1 pyhd8ed1ab_0 conda-forge
attrs 23.2.0 pypi_0 pypi
bzip2 1.0.8 h5eee18b_5
ca-certificates 2024.2.2 hbcca054_0 conda-forge
cloudpickle 3.0.0 pypi_0 pypi
comm 0.2.2 pyhd8ed1ab_0 conda-forge
debugpy 1.6.7 py310h6a678d5_0
decorator 5.1.1 pyhd8ed1ab_0 conda-forge
entrypoints 0.4 pyhd8ed1ab_0 conda-forge
exceptiongroup 1.2.0 pyhd8ed1ab_2 conda-forge
executing 2.0.1 pyhd8ed1ab_0 conda-forge
iniconfig 2.0.0 pypi_0 pypi
ipykernel 6.29.3 pyhd33586a_0 conda-forge
ipython 8.22.2 pyh707e725_0 conda-forge
jedi 0.19.1 pyhd8ed1ab_0 conda-forge
jupyter_client 7.3.4 pyhd8ed1ab_0 conda-forge
jupyter_core 5.7.2 py310hff52083_0 conda-forge
ld_impl_linux-64 2.38 h1181459_1
libffi 3.4.4 h6a678d5_0
libgcc-ng 11.2.0 h1234567_1
libgomp 11.2.0 h1234567_1
libsodium 1.0.18 h36c2ea0_1 conda-forge
libstdcxx-ng 11.2.0 h1234567_1
libuuid 1.41.5 h5eee18b_0
matplotlib-inline 0.1.6 pyhd8ed1ab_0 conda-forge
ml-dtypes 0.3.2 pypi_0 pypi
ncurses 6.4 h6a678d5_0
nest-asyncio 1.6.0 pyhd8ed1ab_0 conda-forge
numpy 1.26.4 pypi_0 pypi
onnx 1.16.0 pypi_0 pypi
onnxoptimizer 0.3.13 pypi_0 pypi
openssl 3.0.13 h7f8727e_0
packaging 24.0 pyhd8ed1ab_0 conda-forge
parso 0.8.3 pyhd8ed1ab_0 conda-forge
pexpect 4.9.0 pyhd8ed1ab_0 conda-forge
pickleshare 0.7.5 py_1003 conda-forge
pip 23.3.1 py310h06a4308_0
platformdirs 4.2.0 pyhd8ed1ab_0 conda-forge
pluggy 1.4.0 pypi_0 pypi
prompt-toolkit 3.0.42 pyha770c72_0 conda-forge
protobuf 5.26.1 pypi_0 pypi
psutil 5.9.8 pypi_0 pypi
ptyprocess 0.7.0 pyhd3deb0d_0 conda-forge
pure_eval 0.2.2 pyhd8ed1ab_0 conda-forge
pygments 2.17.2 pyhd8ed1ab_0 conda-forge
pytest 8.1.1 pypi_0 pypi
python 3.10.14 h955ad1f_0
python-dateutil 2.9.0 pyhd8ed1ab_0 conda-forge
python_abi 3.10 2_cp310 conda-forge
pyzmq 25.1.2 py310h6a678d5_0
readline 8.2 h5eee18b_0
scipy 1.12.0 pypi_0 pypi
setuptools 68.2.2 py310h06a4308_0
six 1.16.0 pyh6c4a22f_0 conda-forge
sqlite 3.41.2 h5eee18b_0
stack_data 0.6.2 pyhd8ed1ab_0 conda-forge
tk 8.6.12 h1ccaba5_0
tlcpack-nightly-cu116 0.15.dev118+g51bdaec6e pypi_0 pypi
tomli 2.0.1 pypi_0 pypi
tornado 6.4 pypi_0 pypi
traitlets 5.14.2 pyhd8ed1ab_0 conda-forge
typing_extensions 4.10.0 pyha770c72_0 conda-forge
tzdata 2024a h04d1e81_0
wcwidth 0.2.13 pyhd8ed1ab_0 conda-forge
wheel 0.41.2 py310h06a4308_0
xgboost 2.0.3 pypi_0 pypi
xz 5.4.6 h5eee18b_0
zeromq 4.3.5 h6a678d5_0
zlib 1.2.13 h5eee18b_0
(tvm-cu116-py310) nyck33@lenovo-gtx1650:/mnt/d/TVM/tvm/tvm-tutorials$
I see some issues like
Is there a different xgboos version I should use? I’m going to try the mentioned 1.7.6 there.
Or do I install Optuna?