Python detect labels
15 Python code examples are found related to "
detect labels".
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: detect.py From python-docs-samples with Apache License 2.0 | 6 votes |
def detect_labels_uri(uri): """Detects labels in the file located in Google Cloud Storage or on the Web.""" from google.cloud import vision client = vision.ImageAnnotatorClient() image = vision.types.Image() image.source.image_uri = uri response = client.label_detection(image=image) labels = response.label_annotations print('Labels:') for label in labels: print(label.description) if response.error.message: raise Exception( '{}\nFor more info on error messages, check: ' 'https://cloud.google.com/apis/design/errors'.format( response.error.message)) # [END vision_label_detection_gcs] # [START vision_landmark_detection]
Example 2
Source File: image_labels.py From professional-services with Apache License 2.0 | 6 votes |
def detect_labels_uri(uri): """This Function detects labels in the image file located in Google Cloud Storage or on theWeb and returns a comma separated list of labels. This will return an empty string if not passed a valid image file Args: uri: a string link to a photo in gcs or on the web (Adapted From: https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/vision/cloud-client/detect/detect.py) """ # Initialize cloud vision api client and image object. client = vision.ImageAnnotatorClient() image = types.Image() image.source.image_uri = uri # Send an api call for this image, extract label descriptions # and return a comma-space separated string. response = client.label_detection(image=image) labels = response.label_annotations label_list = [l.description for l in labels] return ', '.join(label_list)
Example 3
Source File: main.py From serverless-store-demo with Apache License 2.0 | 6 votes |
def detect_labels(data, context): if 'data' in data: request_json = base64.b64decode(data.get('data')).decode() request = json.loads(request_json) product_id = request.get('event_context').get('product_id') product_image = request.get('event_context').get('product_image') image = vision.types.Image() image.source.image_uri = 'gs://{}/{}.png'.format(GCS_BUCKET, product_image) response = vision_client.label_detection(image=image) labels = response.label_annotations top_labels = [ label.description for label in labels[:3] ] product_data = firestore_client.collection('products').document(product_id).get().to_dict() product_data['labels'] = top_labels firestore_client.collection('products').document(product_id).set(product_data) return ''
Example 4
Source File: visionclient.py From galaxy-sdk-python with Apache License 2.0 | 5 votes |
def detect_labels(self, *args, **kwargs): """ # the interface form hasn't been decided, so here use the common form :param args: args[0]: ImageDetectRequest Object :param kwargs: dict Temporarily not used, remain :return:DetectLabelsResult Object """ if not isinstance(args[0], DetectLabelsRequest): raise TypeError("The first argument must be a ImageDetectRequest Object!") # here temporary use deepcopy avoid image content be changed detect_labels_request = copy.deepcopy(args[0]) image_detect_request = ImageDetectRequest() image_detect_request.set_detect_labels_request(detect_labels_request) if image_detect_request.detectLabelsRequest is not None \ and image_detect_request.detectLabelsRequest.image is not None: image = image_detect_request.detectLabelsRequest.image self.__check_parameter(image) if image.content is not None: image_detect_request.detectLabelsRequest.image.content = \ utils.base64_encode(image_detect_request.detectLabelsRequest.image.content) params = utils.obj2json(image_detect_request) headers = utils.auth_headers(self.__method, self.__uri, self.__set_headers(), self.__credential) http_conf = {"method": self.__method, "host": self.__host, "port": self.__port, "resource": self.IMAGE_DETECT_RESOURCE, "timeout": configs.DEFAULT_CLIENT_TIMEOUT} response = httpclient.execute_http_request(http_conf, params, headers) try: result = self.__result2obj(response) if result is None: raise VisionException(errMsg="error is occurred, the response is none!") except VisionException, ve: print ve
Example 5
Source File: main.py From solutions-vision-search with Apache License 2.0 | 5 votes |
def detect_automl_labels(bucket_id, object_id): """Detects labels from image using AutoML Vision.""" try: # Read image file contents from GCS filename = '/{}/{}'.format(bucket_id, object_id) gcs_file = cloudstorage.open(filename) encoded_contents = base64.b64encode(gcs_file.read()) gcs_file.close() # Build request payload dict for label detection request_dict = { 'payload': { 'image': { 'imageBytes': encoded_contents } }, 'params': { 'score_threshold': "0.5" } } # Get predictions from the AutoML Vision model automl_svc = get_automl_svc() parent = 'projects/{}/locations/us-central1/models/{}'.format( app_identity.get_application_id(), current_app.config['AUTOML_MODEL_ID']) request = automl_svc.projects().locations().models().predict( name=parent, body=request_dict) response = request.execute() return response['payload'] except DeadlineExceededError: logging.exception('Exceeded deadline in detect_automl_labels()')
Example 6
Source File: main.py From solutions-vision-search with Apache License 2.0 | 5 votes |
def detect_labels(bucket_id, object_id): """Detects labels from uploaded image using Vision API.""" try: # Construct GCS uri path gcs_image_uri = 'gs://{}/{}'.format(bucket_id, object_id) # Build request payload dict for label detection request_dict = [{ 'image': { 'source': { 'gcsImageUri': gcs_image_uri } }, 'features': [{ 'type': 'LABEL_DETECTION', 'maxResults': 10, }] }] vision_svc = get_vision_svc() api_request = vision_svc.images().annotate(body={ 'requests': request_dict }) response = api_request.execute() labels = [] if 'labelAnnotations' in response['responses'][0]: labels = response['responses'][0]['labelAnnotations'] return labels except DeadlineExceededError: logging.exception('Exceeded deadline in detect_labels()')
Example 7
Source File: detect.py From python-docs-samples with Apache License 2.0 | 5 votes |
def detect_labels(path): """Detects labels in the file.""" from google.cloud import vision import io client = vision.ImageAnnotatorClient() # [START vision_python_migration_label_detection] with io.open(path, 'rb') as image_file: content = image_file.read() image = vision.types.Image(content=content) response = client.label_detection(image=image) labels = response.label_annotations print('Labels:') for label in labels: print(label.description) if response.error.message: raise Exception( '{}\nFor more info on error messages, check: ' 'https://cloud.google.com/apis/design/errors'.format( response.error.message)) # [END vision_python_migration_label_detection] # [END vision_label_detection] # [START vision_label_detection_gcs]
Example 8
Source File: start_content_moderation.py From aws-media-insights-engine with Apache License 2.0 | 5 votes |
def detect_moderation_labels(bucket, key): rek = boto3.client('rekognition') try: response = rek.detect_moderation_labels(Image={'S3Object':{'Bucket':bucket, 'Name':key}}) except Exception as e: output_object.update_workflow_status("Error") output_object.add_workflow_metadata(ContentModerationError=str(e)) raise MasExecutionError(output_object.return_output_object()) return response # Detect explicit or suggestive adult content in a video
Example 9
Source File: start_label_detection.py From aws-media-insights-engine with Apache License 2.0 | 5 votes |
def detect_labels(bucket, key): try: response = rek.detect_labels(Image={'S3Object':{'Bucket':bucket, 'Name':key}}) except Exception as e: output_object.update_workflow_status("Error") output_object.add_workflow_metadata(LabelDetectionError=str(e)) raise MasExecutionError(output_object.return_output_object()) return response # Recognizes labels in a video
Example 10
Source File: app.py From chalice-workshop with Apache License 2.0 | 5 votes |
def detect_labels_on_image(event, context): bucket = event['Bucket'] key = event['Key'] labels = get_rekognition_client().get_image_labels(bucket=bucket, key=key) get_media_db().add_media_file(key, media_type=db.IMAGE_TYPE, labels=labels)
Example 11
Source File: compareFaces.py From aws-developer-workshop with Apache License 2.0 | 5 votes |
def detect_labels(bucket, key): response = rekognition.detect_labels(Image={"S3Object": {"Bucket": bucket, "Name": key}}) # Sample code to write response to DynamoDB table 'MyTable' with 'PK' as Primary Key. # Note: role used for executing this Lambda function should have write access to the table. #table = boto3.resource('dynamodb').Table('MyTable') #labels = [{'Confidence': Decimal(str(label_prediction['Confidence'])), 'Name': label_prediction['Name']} for label_prediction in response['Labels']] #table.put_item(Item={'PK': key, 'Labels': labels}) return response # Not used in the workshop - here for future reference
Example 12
Source File: Cerebro_ProcessImage.py From aws-builders-fair-projects with Apache License 2.0 | 5 votes |
def detect_labels(bucket, key, profile_name=""): response = rekognition.detect_labels(Image={"S3Object": {"Bucket": bucket, "Name": key}}) label_entry = {} label_entry["Labels"] = getFaceAttribute(response,attribute="Labels",is_array_type=True,value_key="Name", confidence_level=50) label_entry["ExternalImageId"] = os.path.basename(key) print(label_entry) table = boto3.resource('dynamodb').Table(ddb_table_name) #labels = [{'Confidence': Decimal(str(label_prediction['Confidence'])), 'Name': label_prediction['Name']} for label_prediction in response['Labels']] current_time = datetime.utcnow() current_time_str = current_time.strftime("%Y-%m-%d %H:%M:%S.%f") current_time_epoch = current_time.strftime("%s") print(current_time_epoch) (dt, micro) = current_time_str.split('.') dt = "%s.%s" % (current_time_epoch, micro) print(dt,micro) current_time_epoch = Decimal(dt) print(current_time_epoch) ddb_item = {} ddb_item['external_image_id'] = key ddb_item['epoch'] = current_time_epoch ddb_item['current_time'] = current_time_str ddb_item['Labels'] = label_entry["Labels"] ddb_item["rec_type"] = "image_labels" if profile_name: ddb_item['profile'] = profile_name print(ddb_item) table.put_item(Item=ddb_item) return label_entry
Example 13
Source File: vision.py From cloud-vision with Apache License 2.0 | 5 votes |
def detect_labels(self, images, max_results=2, num_retries=3): """Uses the Vision API to detect text in the given file. """ batch_request = [] for image in images: batch_request.append({ 'image': { 'content': base64.b64encode(image).decode('UTF-8') }, 'features': [{ 'type': 'LABEL_DETECTION', 'maxResults': max_results, }] }) request = self.vision.images().annotate( body={'requests': batch_request}) response = request.execute(num_retries=num_retries) label_responses = [] for r in response['responses']: labels = [ x['description'] for x in r.get('labelAnnotations', [])] label_responses.append(labels) return label_responses
Example 14
Source File: beta_snippets.py From python-docs-samples with Apache License 2.0 | 4 votes |
def detect_labels_streaming(path): # [START video_streaming_label_detection_beta] from google.cloud import videointelligence_v1p3beta1 as videointelligence # path = 'path_to_file' client = videointelligence.StreamingVideoIntelligenceServiceClient() # Set streaming config. config = videointelligence.types.StreamingVideoConfig( feature=(videointelligence.enums.StreamingFeature.STREAMING_LABEL_DETECTION) ) # config_request should be the first in the stream of requests. config_request = videointelligence.types.StreamingAnnotateVideoRequest( video_config=config ) # Set the chunk size to 5MB (recommended less than 10MB). chunk_size = 5 * 1024 * 1024 # Load file content. stream = [] with io.open(path, "rb") as video_file: while True: data = video_file.read(chunk_size) if not data: break stream.append(data) def stream_generator(): yield config_request for chunk in stream: yield videointelligence.types.StreamingAnnotateVideoRequest( input_content=chunk ) requests = stream_generator() # streaming_annotate_video returns a generator. # The default timeout is about 300 seconds. # To process longer videos it should be set to # larger than the length (in seconds) of the stream. responses = client.streaming_annotate_video(requests, timeout=600) # Each response corresponds to about 1 second of video. for response in responses: # Check for errors. if response.error.message: print(response.error.message) break label_annotations = response.annotation_results.label_annotations # label_annotations could be empty if not label_annotations: continue for annotation in label_annotations: # Each annotation has one frame, which has a timeoffset. frame = annotation.frames[0] time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 description = annotation.entity.description confidence = annotation.frames[0].confidence # description is in Unicode print( u"{}s: {} (confidence: {})".format(time_offset, description, confidence) ) # [END video_streaming_label_detection_beta]