plexus.cli.shared.feedback_evaluation_runner module

class plexus.cli.shared.feedback_evaluation_runner.FeedbackRunnerRequest(scorecard: 'str', score: 'str', days: 'Optional[int]' = None, version: 'Optional[str]' = None, baseline: 'Optional[str]' = None, current_baseline: 'Optional[str]' = None, max_items: 'int' = 200, sampling_mode: 'str' = 'newest', sample_seed: 'Optional[int]' = None, max_category_summary_items: 'int' = 20, task_id: 'Optional[str]' = None, use_yaml: 'bool' = False)

Bases: object

__init__(scorecard: str, score: str, days: int | None = None, version: str | None = None, baseline: str | None = None, current_baseline: str | None = None, max_items: int = 200, sampling_mode: str = 'newest', sample_seed: int | None = None, max_category_summary_items: int = 20, task_id: str | None = None, use_yaml: bool = False) None
baseline: str | None = None
current_baseline: str | None = None
days: int | None = None
max_category_summary_items: int = 20
max_items: int = 200
sample_seed: int | None = None
sampling_mode: str = 'newest'
score: str
scorecard: str
task_id: str | None = None
use_yaml: bool = False
version: str | None = None
plexus.cli.shared.feedback_evaluation_runner.build_feedback_command(*, plexus_bin: str, request: FeedbackRunnerRequest, resolved_task_id: str) list[str]
plexus.cli.shared.feedback_evaluation_runner.build_feedback_run_summary(*, request: FeedbackRunnerRequest, evaluation_id: str, evaluation_info: Dict[str, Any], resolved_task_id: str) Dict[str, Any]
plexus.cli.shared.feedback_evaluation_runner.ensure_feedback_runner_task(*, client: Any, account_id: str, scorecard: str, score: str, version: str | None, task_id: str | None) str

Return a valid existing task ID, or create a new feedback task with stages.

plexus.cli.shared.feedback_evaluation_runner.find_feedback_evaluation_id_by_task_id(*, client: Any, account_id: str, task_id: str, max_pages: int = 20) str | None
plexus.cli.shared.feedback_evaluation_runner.format_feedback_run_kanbus_comment(summary: Dict[str, Any]) str
plexus.cli.shared.feedback_evaluation_runner.post_kanbus_comment(*, issue_id: str, comment: str) None
plexus.cli.shared.feedback_evaluation_runner.run_feedback_evaluation_orchestrated(*, request: FeedbackRunnerRequest, client: Any, account_id: str, plexus_bin: str | None = None, creation_timeout_seconds: int = 180, completion_timeout_seconds: int = 7200, poll_interval_seconds: int = 5, kanbus_issue_id: str | None = None, on_evaluation_created: Callable[[str], None] | None = None) Dict[str, Any]
plexus.cli.shared.feedback_evaluation_runner.wait_for_feedback_evaluation_id(*, client: Any, account_id: str, task_id: str, timeout_seconds: int = 180, poll_interval_seconds: int = 3, process: Popen | None = None) str
plexus.cli.shared.feedback_evaluation_runner.wait_for_feedback_evaluation_terminal_status(*, evaluation_id: str, timeout_seconds: int = 7200, poll_interval_seconds: int = 5, process: Popen | None = None, stderr_log=None, stdout_log=None) Dict[str, Any]