Skip to content

Commit 1e77a1e

Browse files
authored
Merge branch 'main' into update-MIP-3D-version
2 parents 67794ff + de3b50d commit 1e77a1e

File tree

2 files changed

+4
-2
lines changed

2 files changed

+4
-2
lines changed

assets/evaluators/builtin/groundedness/evaluator/_groundedness.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,6 +193,7 @@ def __init__(self, model_config, *, threshold=3, credential=None, **kwargs):
193193
)
194194
self._model_config = model_config
195195
self.threshold = threshold
196+
self._credential = credential
196197
# Needs to be set because it's used in call method to re-validate prompt if `query` is provided
197198

198199
@overload
@@ -299,7 +300,8 @@ def _ensure_query_prompty_loaded(self):
299300
self._flow = AsyncPrompty.load(
300301
source=self._prompty_file,
301302
model=prompty_model_config,
302-
is_reasoning_model=self._is_reasoning_model)
303+
is_reasoning_model=self._is_reasoning_model,
304+
token_credential=self._credential)
303305

304306
def has_context(self, eval_input: dict) -> bool:
305307
"""

assets/evaluators/builtin/groundedness/spec.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
type: "evaluator"
22
name: "builtin.groundedness"
3-
version: 3
3+
version: 4
44
displayName: "Groundedness-Evaluator"
55
description: "Assesses whether the response stays true to the given context in a retrieval-augmented generation scenario. It’s best used for retrieval-augmented generation (RAG) scenarios, including question and answering and summarization. Use the groundedness metric when you need to verify that ai-generated responses align with and are validated by the provided context."
66
evaluatorType: "builtin"

0 commit comments

Comments
 (0)