{
  "service": {
    "id": "bioimage-io/bioengine-worker-8498678846-rcvsx-nvd8kq1r:model-runner",
    "name": "Model Runner",
    "type": "bioengine-app",
    "description": "Runs and compares BioImage.IO models through the BioEngine service. It supports microscopy image inference, model search, and validation."
  },
  "endpoints": {
    "streamable_http": "/bioimage-io/mcp/model-runner/mcp",
    "sse": "/bioimage-io/mcp/model-runner/sse"
  },
  "capabilities": {
    "tools": [
      {
        "name": "get_model_documentation",
        "description": "Retrieve the documentation text for a bioimage.io model.\n\nReads the 'documentation' field from the model RDF. If the field is set,\ndownloads the referenced file from the artifact and returns its content.\n\nReturns:\n    The documentation file content as a string, or None if the\n    'documentation' field is absent/None or the file does not exist.",
        "inputSchema": {
          "properties": {
            "model_id": {
              "description": "Unique identifier of the bioimage.io model (e.g., 'ambitious-ant')",
              "type": "string"
            },
            "stage": {
              "anyOf": [
                {
                  "type": "boolean"
                },
                {
                  "type": "null"
                }
              ],
              "default": false,
              "description": "Whether to fetch documentation from the staged version (True) or committed version (False)"
            }
          },
          "required": [
            "model_id"
          ],
          "type": "object"
        }
      },
      {
        "name": "get_model_rdf",
        "description": "Retrieve the Resource Description Framework (RDF) metadata for a bioimage.io model.\n\nReturns:\n    Dictionary containing the complete RDF metadata structure with nested\n    configuration for inputs, outputs, preprocessing, postprocessing, and model weights\n\nRaises:\n    ValueError: If model_id is invalid or model not found\n    RuntimeError: If download fails",
        "inputSchema": {
          "properties": {
            "model_id": {
              "description": "Unique identifier of the bioimage.io model (e.g., 'ambitious-ant')",
              "type": "string"
            },
            "stage": {
              "anyOf": [
                {
                  "type": "boolean"
                },
                {
                  "type": "null"
                }
              ],
              "default": false,
              "description": "Whether to get RDF from the staged version of the model (True) or the committed version (False)"
            }
          },
          "required": [
            "model_id"
          ],
          "type": "object"
        }
      },
      {
        "name": "get_upload_url",
        "description": "Request a presigned upload URL for uploading an input image to temporary storage.\n\nCreates a unique temporary file in BioEngine S3 storage with a 1-hour TTL.\nUpload the file to the returned URL via an HTTP PUT request, then pass the\nreturned ``file_path`` as the ``inputs`` parameter of the ``infer`` endpoint.\n\nReturns:\n    Dictionary containing:\n    - upload_url: Presigned URL for uploading the file via HTTP PUT\n    - file_path: Unique temporary file path to reference the uploaded file\n\nExample::\n    import httpx, imageio.v3 as iio, io\n\n    result = await model_runner_service.get_upload_url(file_type=\".png\")\n    buf = io.BytesIO()\n    iio.imwrite(buf, image, extension=\".png\")\n    async with httpx.AsyncClient() as client:\n        await client.put(result[\"upload_url\"], content=buf.getvalue())\n    output = await model_runner_service.infer(model_id=\"...\", inputs=result[\"file_path\"])",
        "inputSchema": {
          "properties": {
            "file_type": {
              "description": "File type for the upload. Supported types: \".npy\" (NumPy array), \".png\" (PNG image), \".tiff\"/\".tif\" (TIFF image), \",jpeg\"/\".jpg\" (JPEG image)",
              "enum": [
                ".npy",
                ".png",
                ".tiff",
                ".tif",
                ".jpeg",
                ".jpg"
              ],
              "type": "string"
            }
          },
          "required": [
            "file_type"
          ],
          "type": "object"
        }
      },
      {
        "name": "infer",
        "description": "Execute inference on a bioimage.io model with provided input data.\n\nPerforms end-to-end inference including:\n- Automatic input preprocessing according to model specification\n- Model execution with optimized framework backend\n- Output postprocessing and format standardization\n- Memory-efficient processing for large inputs using tiling if supported\n\nReturns:\n    Dictionary mapping output names to inference results. By default each value is a\n    ``np.ndarray`` whose shape and data type match the model's output specification\n    (e.g. ``{\"output\": result_array}``). When ``return_download_url=True``, each value\n    is instead a presigned S3 download URL (``str``) pointing to the result serialised\n    as a ``.npy`` file; the URL is valid for 1 hour.\n\nRaises:\n    ValueError: If model_id is a URL (only model IDs allowed) or inputs don't match specification\n    FileNotFoundError: If a URL or temporary file path is provided but the resource does not exist or has expired\n    RuntimeError: If model loading, preprocessing, inference, or postprocessing fails\n\nNote:\n    Only published models from the bioimage.io model zoo are supported for inference.\n    This method delegates to the model_inference deployment for optimized execution.\n    String inputs are resolved via ``_load_image_from_source``: direct HTTP/HTTPS URLs are\n    fetched as-is; all other strings are treated as temporary S3 file paths and resolved\n    through BioEngine S3 storage. To upload large inputs, first call ``get_upload_url``\n    to obtain a presigned URL, upload the file, then pass the returned ``file_path`` as ``inputs``.",
        "inputSchema": {
          "properties": {
            "model_id": {
              "description": "Unique identifier of the published bioimage.io model",
              "type": "string"
            },
            "inputs": {
              "anyOf": [
                {
                  "type": "object"
                },
                {
                  "additionalProperties": {
                    "anyOf": [
                      {
                        "type": "object"
                      },
                      {
                        "type": "string"
                      }
                    ]
                  },
                  "type": "object"
                },
                {
                  "type": "string"
                }
              ],
              "description": "Input data as numpy array, dictionary mapping input names to arrays/strings, or a single string. Accepted string formats: a direct HTTP/HTTPS URL (fetched as-is) or a temporary file path returned by ``get_upload_url`` (resolved via S3 storage). Must match the model's input specification for shape and data type. For single-input models, provide a np.ndarray or a string. For multi-input models, provide a dict with input names as keys; each value may be a np.ndarray or a string."
            },
            "weights_format": {
              "anyOf": [
                {
                  "type": "string"
                },
                {
                  "type": "null"
                }
              ],
              "default": null,
              "description": "Preferred model weights format (\"pytorch_state_dict\", \"torchscript\", \"onnx\", \"tensorflow_saved_model\"). If None, automatically selects best available."
            },
            "device": {
              "anyOf": [
                {
                  "enum": [
                    "cuda",
                    "cpu"
                  ],
                  "type": "string"
                },
                {
                  "type": "null"
                }
              ],
              "default": null,
              "description": "Target computation device. \"cuda\" for GPU acceleration, \"cpu\" for CPU-only. If None, automatically selects based on availability and model compatibility."
            },
            "default_blocksize_parameter": {
              "anyOf": [
                {
                  "type": "integer"
                },
                {
                  "type": "null"
                }
              ],
              "default": null,
              "description": "Override default tiling block size for memory management. Larger values use more memory but may be faster. Only applicable for models supporting tiled inference."
            },
            "sample_id": {
              "anyOf": [
                {
                  "type": "string"
                },
                {
                  "type": "null"
                }
              ],
              "default": "sample",
              "description": "Identifier for this inference request, used for logging and debugging"
            },
            "skip_cache": {
              "anyOf": [
                {
                  "type": "boolean"
                },
                {
                  "type": "null"
                }
              ],
              "default": false,
              "description": "Force re-download of model package before inference"
            },
            "return_download_url": {
              "anyOf": [
                {
                  "type": "boolean"
                },
                {
                  "type": "null"
                }
              ],
              "default": false,
              "description": "If True, each array in the output will be saved to a temporary .npy file in S3 and the output value will be a presigned download URL (str) instead of the raw np.ndarray. The URL is valid for 1 hour."
            }
          },
          "required": [
            "model_id",
            "inputs"
          ],
          "type": "object"
        }
      },
      {
        "name": "search_models",
        "description": "Search for models in the bioimage.io collection.\n\nReturns a list of model identifiers with their descriptions that match the search query.",
        "inputSchema": {
          "properties": {
            "keywords": {
              "anyOf": [
                {
                  "items": {
                    "type": "string"
                  },
                  "type": "array"
                },
                {
                  "type": "null"
                }
              ],
              "default": null,
              "description": "List of keywords to filter models by (e.g., ['cell', 'nuclei', 'segmentation']"
            },
            "limit": {
              "anyOf": [
                {
                  "type": "integer"
                },
                {
                  "type": "null"
                }
              ],
              "default": 10,
              "description": "Maximum number of models to return in the search results"
            },
            "ignore_checks": {
              "anyOf": [
                {
                  "type": "boolean"
                },
                {
                  "type": "null"
                }
              ],
              "default": false,
              "description": "Whether to ignore bioengine inference checks and return all models (True) or only models that passed checks (False)"
            }
          },
          "type": "object"
        }
      },
      {
        "name": "test",
        "description": "Execute comprehensive model testing using the `bioimageio.core.test_model` test suite.\n\nCaching behavior:\n- Cached test reports are locally stored at ``<model_package>/.test_cache.json``.\n- Cached results are reused only when ``skip_cache=False`` AND the model\n    package has not changed (same ``latest_remote_modified``) AND the cached\n    ``test_report['env']`` versions for ``bioimageio.core`` and\n    ``bioimageio.spec`` match the currently installed versions.\n- ``skip_cache=True`` forces a complete model package re-download,\n    bypasses cached test results, and runs a fresh test.\n\nAdditional requirements:\n- ``additional_requirements`` are persisted in the cache metadata for\n    observability but are NOT part of automatic cache invalidation.\n    If you change them, use ``skip_cache=True`` to force re-testing.\n\nPublishing behavior:\n- If ``publish_test_report=True``, a compact ``test_summary`` entry is\n    written to the artifact manifest, ``test_report.json`` is uploaded,\n    and the artifact is committed.\n- If the artifact had an open staging version before publishing, staging is\n    re-opened after commit.",
        "inputSchema": {
          "properties": {
            "model_id": {
              "description": "Unique identifier of the bioimage.io model to test",
              "type": "string"
            },
            "stage": {
              "anyOf": [
                {
                  "type": "boolean"
                },
                {
                  "type": "null"
                }
              ],
              "default": false,
              "description": "Whether to get the staged version of the model (True) or the committed version (False)"
            },
            "additional_requirements": {
              "anyOf": [
                {
                  "items": {
                    "type": "string"
                  },
                  "type": "array"
                },
                {
                  "type": "null"
                }
              ],
              "default": null,
              "description": "Extra Python packages to install in the test environment (e.g., [\"scipy>=1.7.0\", \"scikit-image\"])"
            },
            "skip_cache": {
              "anyOf": [
                {
                  "type": "boolean"
                },
                {
                  "type": "null"
                }
              ],
              "default": false,
              "description": "Force a complete model package re-download and bypass cached test results before testing"
            },
            "publish_test_report": {
              "anyOf": [
                {
                  "type": "boolean"
                },
                {
                  "type": "null"
                }
              ],
              "default": false,
              "description": "Automatically publish the test report to the model artifact after testing"
            }
          },
          "required": [
            "model_id"
          ],
          "type": "object"
        }
      },
      {
        "name": "validate",
        "description": "Validate a model Resource Description Framework (RDF) against bioimage.io specifications.\n\nReturns:\n    Validation result containing:\n    - success: Boolean indicating overall validation status\n    - details: Detailed validation report with specific issues or confirmation\n\nNote:\n    This method performs format validation only (perform_io_checks=False).\n    File existence is not verified unless known_files mapping is provided.",
        "inputSchema": {
          "properties": {
            "rdf_dict": {
              "additionalProperties": {
                "anyOf": [
                  {
                    "type": "string"
                  },
                  {
                    "type": "integer"
                  },
                  {
                    "type": "number"
                  },
                  {
                    "items": {},
                    "type": "array"
                  },
                  {
                    "additionalProperties": true,
                    "type": "object"
                  }
                ]
              },
              "description": "Complete RDF dictionary structure to validate",
              "type": "object"
            },
            "known_files": {
              "anyOf": [
                {
                  "additionalProperties": {
                    "type": "string"
                  },
                  "type": "object"
                },
                {
                  "type": "null"
                }
              ],
              "default": null,
              "description": "Mapping of relative file paths to their content hashes for validating file references within the RDF"
            }
          },
          "required": [
            "rdf_dict"
          ],
          "type": "object"
        }
      },
      {
        "name": "get_load",
        "description": "Returns the current load of the BioEngine application service as a float value between 0 and 1.\nThis method is used by Hypha's load balancing system to distribute requests\nacross multiple service instances and for monitoring service capacity.\n\nLoad Calculation:\n- 0.0: No active requests, service is idle and ready to handle new requests\n- 1.0: Maximum capacity reached, all semaphore slots occupied\n- Values between 0 and 1 indicate partial load based on active request ratio\n\nThe load is calculated based on the number of active requests being processed\nthrough the service semaphore, which limits concurrent request processing to prevent overload.\n\nReturns:\n    float: Current service load between 0.0 (idle) and 1.0 (at capacity)",
        "inputSchema": {
          "properties": {},
          "required": [
            "context"
          ],
          "type": "object"
        }
      },
      {
        "name": "get_num_pcs",
        "description": "Returns the current number of active WebRTC peer connections for this BioEngine application.\nThis method is used for monitoring connection status, debugging WebRTC connectivity issues,\nand understanding real-time usage patterns of the application.\n\nWebRTC peer connections enable direct peer-to-peer communication between clients and the\nBioEngine application, bypassing traditional server-mediated communication for better\nperformance with large data transfers.\n\nConnection States Tracked:\n- Only counts connections in \"connected\" state\n- Excludes failed, closed, or disconnected connections\n- Updates automatically as connections are established or terminated\n\nReturns:\n    int: Number of currently active WebRTC peer connections (0 or positive integer)",
        "inputSchema": {
          "properties": {},
          "required": [
            "context"
          ],
          "type": "object"
        }
      },
      {
        "name": "get_rtc_service_id",
        "description": "Returns the registered WebRTC service identifier if the WebRTC service is successfully registered.\nThis method is used to verify WebRTC service availability and obtain the service ID for\ndirect peer-to-peer connections to the BioEngine application.\n\nWebRTC Service Registration:\n- WebRTC services are registered separately from the main WebSocket service\n- Service ID follows the pattern: \"{application_id}-rtc\"\n- Registration may fail due to network issues or server constraints\n- Main application remains functional even if WebRTC registration fails\n\nUse Cases:\n- Verify WebRTC capability before attempting peer-to-peer connections\n- Troubleshoot WebRTC service registration issues\n- Provide service discovery information to clients\n\nReturns:\n    Optional[str]: WebRTC service ID string if registered, None if registration failed or pending",
        "inputSchema": {
          "properties": {},
          "required": [
            "context"
          ],
          "type": "object"
        }
      }
    ],
    "resources": [],
    "prompts": []
  },
  "summary": {
    "service_id": "bioimage-io/bioengine-worker-8498678846-rcvsx-nvd8kq1r:model-runner",
    "service_type": "bioengine-app",
    "tools_count": 10,
    "resources_count": 0,
    "prompts_count": 0
  },
  "help": "Use the streamable HTTP endpoint for MCP communication or SSE for server-sent events."
}