Data Labeling API . projects . datasets . evaluations

Instance Methods

exampleComparisons()

Returns the exampleComparisons Resource.

close()

Close httplib2 connections.

get(name, x__xgafv=None)

Gets an evaluation by resource name (to search, use projects.evaluations.search).

Method Details

close()
Close httplib2 connections.
get(name, x__xgafv=None)
Gets an evaluation by resource name (to search, use projects.evaluations.search).

Args:
  name: string, Required. Name of the evaluation. Format: "projects/{project_id}/datasets/ {dataset_id}/evaluations/{evaluation_id}' (required)
  x__xgafv: string, V1 error format.
    Allowed values
      1 - v1 error format
      2 - v2 error format

Returns:
  An object of the form:

    { # Describes an evaluation between a machine learning model's predictions and ground truth labels. Created when an EvaluationJob runs successfully.
  "annotationType": "A String", # Output only. Type of task that the model version being evaluated performs, as defined in the evaluationJobConfig.inputConfig.annotationType field of the evaluation job that created this evaluation.
  "config": { # Configuration details used for calculating evaluation metrics and creating an Evaluation. # Output only. Options used in the evaluation job that created this evaluation.
    "boundingBoxEvaluationOptions": { # Options regarding evaluation between bounding boxes. # Only specify this field if the related model performs image object detection (`IMAGE_BOUNDING_BOX_ANNOTATION`). Describes how to evaluate bounding boxes.
      "iouThreshold": 3.14, # Minimum [intersection-over-union (IOU)](/vision/automl/object-detection/docs/evaluate#intersection-over-union) required for 2 bounding boxes to be considered a match. This must be a number between 0 and 1.
    },
  },
  "createTime": "A String", # Output only. Timestamp for when this evaluation was created.
  "evaluatedItemCount": "A String", # Output only. The number of items in the ground truth dataset that were used for this evaluation. Only populated when the evaulation is for certain AnnotationTypes.
  "evaluationJobRunTime": "A String", # Output only. Timestamp for when the evaluation job that created this evaluation ran.
  "evaluationMetrics": { # Output only. Metrics comparing predictions to ground truth labels.
    "classificationMetrics": { # Metrics calculated for a classification model.
      "confusionMatrix": { # Confusion matrix of the model running the classification. Only applicable when the metrics entry aggregates multiple labels. Not applicable when the entry is for a single label. # Confusion matrix of predicted labels vs. ground truth labels.
        "row": [
          { # A row in the confusion matrix. Each entry in this row has the same ground truth label.
            "annotationSpec": { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of the ground truth label for this row.
              "description": "A String", # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long.
              "displayName": "A String", # Required. The display name of the AnnotationSpec. Maximum of 64 characters.
              "index": 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: "dog", index: 0 }` and one AnnotationSpec with `{ display_name: "cat", index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values.
            },
            "entries": [ # A list of the confusion matrix entries. One entry for each possible predicted label.
              {
                "annotationSpec": { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of a predicted label.
                  "description": "A String", # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long.
                  "displayName": "A String", # Required. The display name of the AnnotationSpec. Maximum of 64 characters.
                  "index": 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: "dog", index: 0 }` and one AnnotationSpec with `{ display_name: "cat", index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values.
                },
                "itemCount": 42, # Number of items predicted to have this label. (The ground truth label for these items is the `Row.annotationSpec` of this entry's parent.)
              },
            ],
          },
        ],
      },
      "prCurve": { # Precision-recall curve based on ground truth labels, predicted labels, and scores for the predicted labels.
        "annotationSpec": { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of the label for which the precision-recall curve calculated. If this field is empty, that means the precision-recall curve is an aggregate curve for all labels.
          "description": "A String", # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long.
          "displayName": "A String", # Required. The display name of the AnnotationSpec. Maximum of 64 characters.
          "index": 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: "dog", index: 0 }` and one AnnotationSpec with `{ display_name: "cat", index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values.
        },
        "areaUnderCurve": 3.14, # Area under the precision-recall curve. Not to be confused with area under a receiver operating characteristic (ROC) curve.
        "confidenceMetricsEntries": [ # Entries that make up the precision-recall graph. Each entry is a "point" on the graph drawn for a different `confidence_threshold`.
          {
            "confidenceThreshold": 3.14, # Threshold used for this entry. For classification tasks, this is a classification threshold: a predicted label is categorized as positive or negative (in the context of this point on the PR curve) based on whether the label's score meets this threshold. For image object detection (bounding box) tasks, this is the [intersection-over-union (IOU)](/vision/automl/object-detection/docs/evaluate#intersection-over-union) threshold for the context of this point on the PR curve.
            "f1Score": 3.14, # Harmonic mean of recall and precision.
            "f1ScoreAt1": 3.14, # The harmonic mean of recall_at1 and precision_at1.
            "f1ScoreAt5": 3.14, # The harmonic mean of recall_at5 and precision_at5.
            "precision": 3.14, # Precision value.
            "precisionAt1": 3.14, # Precision value for entries with label that has highest score.
            "precisionAt5": 3.14, # Precision value for entries with label that has highest 5 scores.
            "recall": 3.14, # Recall value.
            "recallAt1": 3.14, # Recall value for entries with label that has highest score.
            "recallAt5": 3.14, # Recall value for entries with label that has highest 5 scores.
          },
        ],
        "meanAveragePrecision": 3.14, # Mean average prcision of this curve.
      },
    },
    "objectDetectionMetrics": { # Metrics calculated for an image object detection (bounding box) model.
      "prCurve": { # Precision-recall curve.
        "annotationSpec": { # Container of information related to one possible annotation that can be used in a labeling task. For example, an image classification task where images are labeled as `dog` or `cat` must reference an AnnotationSpec for `dog` and an AnnotationSpec for `cat`. # The annotation spec of the label for which the precision-recall curve calculated. If this field is empty, that means the precision-recall curve is an aggregate curve for all labels.
          "description": "A String", # Optional. User-provided description of the annotation specification. The description can be up to 10,000 characters long.
          "displayName": "A String", # Required. The display name of the AnnotationSpec. Maximum of 64 characters.
          "index": 42, # Output only. This is the integer index of the AnnotationSpec. The index for the whole AnnotationSpecSet is sequential starting from 0. For example, an AnnotationSpecSet with classes `dog` and `cat`, might contain one AnnotationSpec with `{ display_name: "dog", index: 0 }` and one AnnotationSpec with `{ display_name: "cat", index: 1 }`. This is especially useful for model training as it encodes the string labels into numeric values.
        },
        "areaUnderCurve": 3.14, # Area under the precision-recall curve. Not to be confused with area under a receiver operating characteristic (ROC) curve.
        "confidenceMetricsEntries": [ # Entries that make up the precision-recall graph. Each entry is a "point" on the graph drawn for a different `confidence_threshold`.
          {
            "confidenceThreshold": 3.14, # Threshold used for this entry. For classification tasks, this is a classification threshold: a predicted label is categorized as positive or negative (in the context of this point on the PR curve) based on whether the label's score meets this threshold. For image object detection (bounding box) tasks, this is the [intersection-over-union (IOU)](/vision/automl/object-detection/docs/evaluate#intersection-over-union) threshold for the context of this point on the PR curve.
            "f1Score": 3.14, # Harmonic mean of recall and precision.
            "f1ScoreAt1": 3.14, # The harmonic mean of recall_at1 and precision_at1.
            "f1ScoreAt5": 3.14, # The harmonic mean of recall_at5 and precision_at5.
            "precision": 3.14, # Precision value.
            "precisionAt1": 3.14, # Precision value for entries with label that has highest score.
            "precisionAt5": 3.14, # Precision value for entries with label that has highest 5 scores.
            "recall": 3.14, # Recall value.
            "recallAt1": 3.14, # Recall value for entries with label that has highest score.
            "recallAt5": 3.14, # Recall value for entries with label that has highest 5 scores.
          },
        ],
        "meanAveragePrecision": 3.14, # Mean average prcision of this curve.
      },
    },
  },
  "name": "A String", # Output only. Resource name of an evaluation. The name has the following format: "projects/{project_id}/datasets/{dataset_id}/evaluations/ {evaluation_id}'
}