This message was deleted.
# ask-for-help
s
This message was deleted.
1
j
Could you share your custom runnable as well?
s
Hi @Jiang Sure. I have updated the service code to provide more information. Please see my runner definition below:
Copy code
model = xgboost.get("model:latest")


class MyRunnable(bentoml.Runnable):
    SUPPORTED_RESOURCES = ("cpu",)
    SUPPORTS_CPU_MULTI_THREADING = True

    def __init__(self):
        self._predictor = Predictor(model=xgboost.load_model(model))

    @bentoml.Runnable.method(batchable=True)
    def predict(self, input_df):
        predictions = self._predictor.predict(input_df)

        return self._build_result(predictions)

    def _build_result(self, predictions):
        prediction_results = []
        for _, row in predictions.iterrows():
            prediction_results.append(PredictionResult(rate=row.rate, ...))

        return prediction_results
This is resolved in this thread.