Python cv2.imencode() Examples

The following are 30 code examples of cv2.imencode(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: rl_data.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 14 votes vote down vote up
def make_web(queue):
    app = Flask(__name__)

    @app.route('/')
    def index():
        return render_template('index.html')

    def gen():
        while True:
            frame = queue.get()
            _, frame = cv2.imencode('.JPEG', frame)
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame.tostring() + b'\r\n')

    @app.route('/video_feed')
    def video_feed():
        return Response(gen(),
                        mimetype='multipart/x-mixed-replace; boundary=frame')

    try:
        app.run(host='0.0.0.0', port=8889)
    except:
        print('unable to open port') 
Example #2
Source File: runner.py    From MobileNetV2-PoseEstimation with MIT License 7 votes vote down vote up
def infer(image, model='cmu', resize='0x0', resize_out_ratio=4.0):
    """

    :param image:
    :param model:
    :param resize:
    :param resize_out_ratio:
    :return: coco_style_keypoints array
    """
    w, h = model_wh(resize)
    e = get_estimator(model, resize)

    # estimate human poses from a single image !
    image = common.read_imgfile(image, None, None)
    if image is None:
        raise Exception('Image can not be read, path=%s' % image)
    humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=resize_out_ratio)
    image_h, image_w = image.shape[:2]

    if "TERM_PROGRAM" in os.environ and 'iTerm' in os.environ["TERM_PROGRAM"]:
        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
        image_str = cv2.imencode(".jpg", image)[1].tostring()
        print("\033]1337;File=name=;inline=1:" + base64.b64encode(image_str).decode("utf-8") + "\a")

    return [(eval.write_coco_json(human, image_w, image_h), human.score) for human in humans] 
Example #3
Source File: opencv_adapter.py    From gabriel with Apache License 2.0 7 votes vote down vote up
def get_producer_wrappers(self):
        async def producer():
            _, frame = self._video_capture.read()
            if frame is None:
                return None

            frame = self._preprocess(frame)
            _, jpeg_frame = cv2.imencode('.jpg', frame)

            input_frame = gabriel_pb2.InputFrame()
            input_frame.payload_type = gabriel_pb2.PayloadType.IMAGE
            input_frame.payloads.append(jpeg_frame.tostring())

            extras = self._produce_extras()
            if extras is not None:
                input_frame.extras.Pack(extras)

            return input_frame

        return [
            ProducerWrapper(producer=producer, source_name=self._source_name)
        ] 
Example #4
Source File: rl_data.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def make_web(queue):
    app = Flask(__name__)

    @app.route('/')
    def index():
        return render_template('index.html')

    def gen():
        while True:
            frame = queue.get()
            _, frame = cv2.imencode('.JPEG', frame)
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame.tostring() + b'\r\n')

    @app.route('/video_feed')
    def video_feed():
        return Response(gen(),
                        mimetype='multipart/x-mixed-replace; boundary=frame')

    try:
        app.run(host='0.0.0.0', port=8889)
    except:
        print('unable to open port') 
Example #5
Source File: image_recognition.py    From ROS-Programming-Building-Powerful-Robots with MIT License 6 votes vote down vote up
def callback(self, image_msg):
        cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
        # copy from
        # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/imagenet/classify_image.py
        image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
        # Creates graph from saved GraphDef.
        softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
        predictions = self._session.run(
            softmax_tensor, {'DecodeJpeg/contents:0': image_data})
        predictions = np.squeeze(predictions)
        # Creates node ID --> English string lookup.
        node_lookup = classify_image.NodeLookup()
        top_k = predictions.argsort()[-self.use_top_k:][::-1]
        for node_id in top_k:
            human_string = node_lookup.id_to_string(node_id)
            score = predictions[node_id]
            if score > self.score_threshold:
                rospy.loginfo('%s (score = %.5f)' % (human_string, score))
                self._pub.publish(human_string) 
Example #6
Source File: app.py    From Gather-Deployment with MIT License 6 votes vote down vote up
def gen_livestream():
    global last_frame
    while True:
        if app.queue.qsize():
            frame = base64.b64decode(app.queue.get().split('base64')[-1])
            last_frame = frame
        else:
            if last_frame is None:
                fh = open(d+"/static/black.jpg", "rb")
                frame = fh.read()
                fh.close()
            else:
                frame = last_frame
        if last_frame:
            img_np = np.array(Image.open(io.BytesIO(frame)))
            img_np = detect_object(img_np)
            frame = cv2.imencode('.jpg', cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB))[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') 
Example #7
Source File: opencv.py    From Gather-Deployment with MIT License 6 votes vote down vote up
def get_frame(self):
        _, frame = self.video.read()
        img = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2RGB)
        image_np_expanded = np.expand_dims(img, axis = 0)
        image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
        boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
        scores = detection_graph.get_tensor_by_name('detection_scores:0')
        classes = detection_graph.get_tensor_by_name('detection_classes:0')
        num_detections = detection_graph.get_tensor_by_name('num_detections:0')
        (boxes, scores, classes, num_detections) = sess.run(
            [boxes, scores, classes, num_detections],
            feed_dict = {image_tensor: image_np_expanded},
        )
        vis_util.visualize_boxes_and_labels_on_image_array(
            frame,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates = True,
            line_thickness = 8,
        )
        # return BGR, cv2 will returned BGR to RGB
        return cv2.imencode('.jpg', frame)[1].tobytes() 
Example #8
Source File: landmark_augment.py    From face_landmark_dnn with MIT License 6 votes vote down vote up
def mini_crop_by_landmarks(self, sample_list, pad_rate, img_format):
        """
        Crop full image to mini. Only keep valid image to save
        Args:
            sample_list: (image, landmarks)
            pad_rate: up scale rate
            img_format: "RGB" or "BGR"
        Return:
            new sample list
        Raises:
            No
        """
        new_sample_list = []
        for sample in sample_list:
            image = cv2.imread(sample[0])
            if img_format == 'RGB':
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            landmarks = sample[1]
            (x1, y1, x2, y2), _, _, _ = self.get_bbox_of_landmarks(image, landmarks, pad_rate, 0.5)
            new_sample_list.append(
                (cv2.imencode(".jpg", image[y1:y2, x1:x2])[1], landmarks - (x1, y1))
            )
            return new_sample_list 
Example #9
Source File: ros_tensorflow_classify.py    From ros_tensorflow with Apache License 2.0 6 votes vote down vote up
def callback(self, image_msg):
        cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
        image_data = cv2.imencode('.jpg', cv_image)[1].tostring()

        # Creates graph from saved GraphDef.
        softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
        predictions = self._session.run(
            softmax_tensor, {'DecodeJpeg/contents:0': image_data})
        predictions = np.squeeze(predictions)

        # Creates node ID --> English string lookup.
        node_lookup = self.load(PATH_TO_LABELS, PATH_TO_UID)
        top_k = predictions.argsort()[-self.use_top_k:][::-1]
        for node_id in top_k:
            
            if node_id not in node_lookup:
                human_string = ''
            else:
                human_string = node_lookup[node_id]

            score = predictions[node_id]
            if score > self.score_threshold:
                rospy.loginfo('%s (score = %.5f)' % (human_string, score))
                self._pub.publish(human_string) 
Example #10
Source File: full_model_pack.py    From rec-attend-public with MIT License 6 votes vote down vote up
def write_log(self, results):
    """Process results
    Args:
      results: y_out, s_out
    """
    inp = results['_batches'][0]
    y_out = results['y_out']
    s_out = results['s_out']
    with h5py.File(self.dataset.h5_fname, 'r+') as h5f:
      print inp['idx_map']
      for ii in xrange(y_out.shape[0]):
        idx = inp['idx_map'][ii]
        group = h5f[self.dataset.get_str_id(idx)]
        if 'instance_pred' in group:
          del group['instance_pred']
        for ins in xrange(y_out.shape[1]):
          y_out_arr = y_out[ii, ins]
          y_out_arr = (y_out_arr * 255).astype('uint8')
          y_out_str = cv2.imencode('.png', y_out_arr)[1]
          group['instance_pred/{:02d}'.format(ins)] = y_out_str
        if 'score_pred' in group:
          del group['score_pred']
        group['score_pred'] = s_out[ii] 
Example #11
Source File: client.py    From IkaLog with Apache License 2.0 6 votes vote down vote up
def recoginize_deadly_weapons(self, deadly_weapons_list):
        if len(deadly_weapons_list) == 0:
            return None
        images = self.pack_deadly_weapons_image(deadly_weapons_list)

        payload = {
            'game_language': Localization.get_game_languages()[0],
            'sample_height': deadly_weapons_list[0].shape[0],
            'sample_width': deadly_weapons_list[0].shape[1],
            'samples': cv2.imencode('.png', images)[1].tostring()
        }

        response = self._request_func(
            '/api/v1/recoginizer/deadly_weapon',
            payload,
        )

        if response.get('status', None) != 'ok':
            return {'status': 'error'}

        return response 
Example #12
Source File: client.py    From IkaLog with Apache License 2.0 6 votes vote down vote up
def recoginize_abilities(self, abilities_list):
        payload = []

        for img_ability in abilities_list:
            result, img_ability_png = cv2.imencode('.png', img_ability)
            payload.append(img_ability_png.tostring())

        response = self._request_func(
            '/api/v1/recoginizer/ability',
            payload,
        )

        # Validate the response.
        assert response is not None, 'NO API Response.'
        assert response.get('status', None) == 'ok', 'API Error.'

        # Decode the response.
        ret = []
        for entry in response['abilities']:
            ret.append(entry.get('ability', None))

        return ret 
Example #13
Source File: camera_opencv.py    From object-detection with MIT License 6 votes vote down vote up
def img_to_base64(self, img):
        """encode as a jpeg image and return it"""
        buffer = cv2.imencode('.jpg', img)[1].tobytes()
        jpg_as_text = base64.b64encode(buffer)
        base64_string = jpg_as_text.decode('utf-8')
        return base64_string 
Example #14
Source File: client.py    From IkaLog with Apache License 2.0 6 votes vote down vote up
def recoginize_weapons(self, weapons_list):
        payload = []

        for img_weapon in weapons_list:
            result, img_weapon_png = cv2.imencode('.png', img_weapon)
            payload.append(img_weapon_png.tostring())

        response = self._request_func(
            '/api/v1/recoginizer/weapon',
            payload,
        )

        # Validate the response.
        assert response is not None, 'NO API Response.'
        assert response.get('status', None) == 'ok', 'API Error.'

        # Decode the response.
        ret = []
        for entry in response['weapons']:
            ret.append(entry.get('weapon', None))

        return ret 
Example #15
Source File: noise.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def _augment(self, img, q):
        enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
        return cv2.imdecode(enc, 1).astype(img.dtype) 
Example #16
Source File: noise.py    From VDAIC2017 with MIT License 5 votes vote down vote up
def _augment(self, img, q):
        enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
        return cv2.imdecode(enc, 1) 
Example #17
Source File: facematch.py    From xi-iot with MIT License 5 votes vote down vote up
def main(ctx,msg):
    logging.info("***** Face Match script Start *****")
    msg = json.loads(msg)
    data = msg['data']
    image = Image.open(io.BytesIO(base64.b64decode(data))).convert('RGB')
    cvImage = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
    faces = msg['faces']
    # Returning if we don't find any face.  
    if len(faces) == 0:
        logging.info("No face found")
        logging.info("***** Face match script End *****")
        return
    for i in range(len(faces)):
        known_face = facematch.match(np.asarray(faces[i]['embedding']))
        bb = faces[i]['bb']
        if known_face is None:
            faces[i]['knownface'] = False
            cv2.rectangle(cvImage,(bb[0], bb[1]), (bb[2], bb[3]),(0, 0,255), 2)
        else:
            faces[i]['knownface'] = True
            faces[i]['name'] = known_face['name']
            faces[i]['designation'] = known_face['designation']
            faces[i]['department'] = known_face['department']
            faces[i]['employee_id'] = known_face['employee_id']
            logging.info("Found matching face with employee id: %s",known_face['employee_id'])
            cv2.rectangle(cvImage,(bb[0], bb[1]), (bb[2], bb[3]),(0, 255, 0), 2)
    response ={}
    if len(faces) !=0 :
        # encode image as jpeg
        _, img_encoded = cv2.imencode('.jpg', cvImage)
        encodedStr = base64.b64encode(img_encoded)
        response['image'] = encodedStr
        response['faces'] = faces
        logging.info("Idenitfied %d faces",len(faces))
    else:
        response['image'] = data
        response['faces'] = faces
    ctx.send(json.dumps(response))
    logging.info("***** Face match script End *****")
    return 
Example #18
Source File: ImageMiniLab.py    From ImageMiniLab with GNU General Public License v3.0 5 votes vote down vote up
def decode_and_show_dst(self, dst):
        ret, img_buf = cv.imencode(".jpg", dst)
        # print(ret, img_buf)
        if ret is True:
            ret = self.dst_pix.loadFromData(img_buf)
            if ret is True:
                self.show_exp_pix()

    # 灰度化 
Example #19
Source File: ucf101_dataset.py    From video_prediction with MIT License 5 votes vote down vote up
def read_videos_and_save_tf_records(output_dir, fnames, start_sequence_iter=None,
                                    end_sequence_iter=None, sequences_per_file=128):
    print('started process with PID:', os.getpid())

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if start_sequence_iter is None:
        start_sequence_iter = 0
    if end_sequence_iter is None:
        end_sequence_iter = len(fnames)

    def preprocess_image(image):
        if image.shape != (240, 320, 3):
            image = cv2.resize(image, (320, 240), interpolation=cv2.INTER_LINEAR)
        return tf.compat.as_bytes(cv2.imencode(".jpg", image)[1].tobytes())

    print('reading and saving sequences {0} to {1}'.format(start_sequence_iter, end_sequence_iter))

    sequences = []
    for sequence_iter in range(start_sequence_iter, end_sequence_iter):
        if not sequences:
            last_start_sequence_iter = sequence_iter
            print("reading sequences starting at sequence %d" % sequence_iter)

        sequences.append(read_video(fnames[sequence_iter]))

        if len(sequences) == sequences_per_file or sequence_iter == (end_sequence_iter - 1):
            output_fname = 'sequence_{0}_to_{1}.tfrecords'.format(last_start_sequence_iter, sequence_iter)
            output_fname = os.path.join(output_dir, output_fname)
            save_tf_record(output_fname, sequences, preprocess_image)
            sequences[:] = [] 
Example #20
Source File: lambda_extract_frame_video.py    From aws-tutorial-code with MIT License 5 votes vote down vote up
def lambda_handler(event, context):
    s3_resource = boto3.resource("s3")
    s3_client = boto3.client("s3")
    try:
        file_obj = event["Records"][0]
        # extract bucket name from event data on trigger
        bucket_name = str(file_obj['s3']['bucket']['name'])
        # extract file name from event data on trigger
        file_name = str(file_obj['s3']['object']['key'])
        print(f"Bucket Name: {bucket_name}\nFileName: {file_name}")

        # temporary path to save video file
        tmp_file_path = "/tmp/{}".format(file_name)
        print(f"Temporary Path: {tmp_file_path}")

        # downloading file to the tmp path
        s3_resource.meta.client.download_file(bucket_name, file_name, tmp_file_path)

        # loading video source
        cap = cv2.VideoCapture(tmp_file_path)
        # initializing frame count
        frameCount = 0
        # deriving framerate
        frameRate = math.floor(cap.get(cv2.CAP_PROP_FPS))

        while cap.isOpened():
        	# extracting frame
            ret, frame = cap.read()
            frameCount += 1
            if (ret != True):
                break
            # capturing frame every 10 seconds
            if frameCount%(10*frameRate) == 0:
            	# basically converting numpy array to bytes
                res, im_jpg = cv2.imencode('.jpg', frame)
                # saving frame to s3
                s3_client.put_object(Bucket="bucket-name", Key="{}.jpg".format(uuid.uuid4()), Body=im_jpg.tobytes())

    except Exception as e:
        print("Unable to extract frames : {}".format(e))
        return "Unable to extract frames" 
Example #21
Source File: example_correlation_with_globals.py    From xi-iot with MIT License 5 votes vote down vote up
def main(ctx,msg):
    global image_payload
    global label_payload
    
    if ctx.get_topic() == "images":
        '''
        Use get_topic to determine the MQTT topic.
        If we receive an image message then store the payload in image_payload global.
        '''
        logging.info("***** Image message received *****")
        logging.info("***** Unpacking message *****")
        unpacked_dict = msgpack.unpackb(msg, raw=True)
        image = numpy.fromstring(unpacked_dict["Data"], dtype=unpacked_dict["DataType"])
        image = image.reshape((unpacked_dict["Height"],unpacked_dict["Width"],unpacked_dict["Channels"]))
        _, img_encoded = cv2.imencode('.jpg', image)
        encodedStr = base64.b64encode(img_encoded)
        logging.info("***** Storing unpacked frame payload in memory *****")
        payload ={}
        payload['timestamp'] = ctx.get_timestamp()
        payload['image'] = encodedStr
        image_payload = payload
        return
    elif ctx.get_topic() == "labels":
        logging.info("***** Label message received *****")
        if image_payload == "init":
            logging.info("***** Label message received, but no image payload in memory *****")
        else:
            #If we receive a label and have image_payload then combine them.
            logging.info("***** Retrieving unpacked frame payload from memory *****")
            payload ={}
            payload['image_timestamp'] = image_payload['timestamp']
            payload['image'] = image_payload['image']
            logging.info("***** Adding label payload to image payload *****")
            payload['label_timestamp'] = ctx.get_timestamp()
            payload['label'] = msg
            #Return the combined payload as a single JSON.
            return ctx.send(json.dumps(payload))
    else:
        return 
Example #22
Source File: pickle_provider.py    From lffd-pytorch with MIT License 5 votes vote down vote up
def write(self):

        for data_item in self.data_adapter.get_one():

            temp_sample = []
            im, bboxes = data_item
            ret, buf = cv2.imencode(self.compression_mode, im, self.encode_params)
            if buf is None or buf.size == 0:
                print('buf is wrong.')
                continue
            if not ret:
                print('An error is occurred.')
                continue
            temp_sample.append(buf)

            if isinstance(bboxes, str):  # 负样本
                temp_sample.append(0)
                temp_sample.append(int(bboxes))
            else:
                temp_sample.append(1)
                temp_sample.append(bboxes)

            self.data[self.counter] = temp_sample
            print('Successfully save the %d-th data item.' % self.counter)
            self.counter += 1

        pickle.dump(self.data, open(self.pickle_file_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL) 
Example #23
Source File: file_utils.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def write_image(image_path, rgb):
  ext = os.path.splitext(image_path)[1]
  with gfile.GFile(image_path, 'w') as f:
    img_str = cv2.imencode(ext, rgb[:,:,::-1])[1].tostring()
    f.write(img_str) 
Example #24
Source File: reprocess_screenshot_client.py    From IkaLog with Apache License 2.0 5 votes vote down vote up
def scoreboard_recognition(self, img_result):
        result, img_result_png = cv2.imencode('.png', img_result)
        payload = {
            'image_result': img_result_png.tostring(),
        }

        response = self._request_func(
            '/api/v1/scoreboard_recognition',
            payload,
        )

#        if response.get('status', None) != 'ok':
#            return {'status': 'error'}

        return response 
Example #25
Source File: util.py    From FET-GAN with MIT License 5 votes vote down vote up
def imshow_jupyter(imgtensor):
    tensor_numpy = imgtensor.cpu().float().numpy()  # convert it into a numpy array
    tensor_numpy = (np.transpose(tensor_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0  # post-processing: tranpose and scaling [-1,1]->[0,255]
    tensor_numpy = tensor_numpy.astype(np.uint8)
    import cv2
    import IPython
    _,ret = cv2.imencode('.jpg', tensor_numpy) 
    i = IPython.display.Image(data=ret)
    IPython.display.display(i) 
Example #26
Source File: statink.py    From IkaLog with Apache License 2.0 5 votes vote down vote up
def encode_image(self, img):
        result, img_png = cv2.imencode('.png', img)

        if not result:
            IkaUtils.dprint('%s: Failed to encode the image (%s)' %
                            (self, img.shape))
            return None

        s = img_png.tostring()

        IkaUtils.dprint('%s: Encoded screenshot (%dx%d %d bytes)' %
                        (self, img.shape[1], img.shape[0], len(s)))

        return s 
Example #27
Source File: create_tfrecord.py    From document-ocr with Apache License 2.0 5 votes vote down vote up
def create_tf_record(data_dir, tfrecords_path):
  image_names = []
  for root, dirs, files in os.walk(data_dir):
    image_names +=[os.path.join(root, name) for name in files]
  random.shuffle(image_names)
  writer = tf.python_io.TFRecordWriter(tfrecords_path)
  print("handle image : %d"%(len(image_names)))
  i = 0
  for image_name in image_names:
    if i % 10000 == 0:
      print(i, len(image_names))
    i+=1
    im = cv2.imread(image_name, cv2.IMREAD_GRAYSCALE)
    try:
      is_success, image_buffer = cv2.imencode('.png', im)
    except Exception as e:
      continue
    if not is_success:
      continue
    label = int(image_name.split("/")[-2])
    features = tf.train.Features(feature={
         'labels': _int64_feature(label),
          'images': _bytes_feature(image_buffer.tostring()),
          'imagenames': _bytes_feature(image_name.encode("utf-8"))})
    example = tf.train.Example(features=features)
    writer.write(example.SerializeToString())
  writer.close() 
Example #28
Source File: file_utils.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def write_image(image_path, rgb):
  ext = os.path.splitext(image_path)[1]
  with gfile.GFile(image_path, 'w') as f:
    img_str = cv2.imencode(ext, rgb[:,:,::-1])[1].tostring()
    f.write(img_str) 
Example #29
Source File: image_functions.py    From niryo_one_ros with GNU General Public License v3.0 5 votes vote down vote up
def compress_image(img, quality=90):
    """
    Compress OpenCV image
    :param img: OpenCV Image
    :param quality: integer between 1 - 100. The higher it is, the less information will be lost,
    but the heavier the compressed image will be
    :return: string representing compressed image
    """
    result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality])
    if not result:
        return False, None

    return True, np.array(encimg).tostring() 
Example #30
Source File: twitter.py    From IkaLog with Apache License 2.0 5 votes vote down vote up
def post_media(self, img):
        result, img_png = cv2.imencode('.png', img)

        if not result:
            IkaUtils.dprint('%s: Failed to encode the image (%s)' %
                            (self, img.shape))
            return None

        files = { "media": img_png.tostring() }

        CK = self._preset_ck if self.consumer_key_type == 'ikalog' else self.consumer_key
        CS = self._preset_cs if self.consumer_key_type == 'ikalog' else self.consumer_secret

        from requests_oauthlib import OAuth1Session
        twitter = OAuth1Session(
            CK, CS, self.access_token, self.access_token_secret
        )
        req = twitter.post(
            self.url_media,
            files=files,
            verify=self._get_cert_path()
        )

        if req.status_code == 200:
            return json.loads(req.text)['media_id']

        IkaUtis.dprint('%s: Failed to post media.' % self)
        return None

    ##
    # get_text_game_individual_result
    # Generate a record for on_game_individual_result.
    # @param self      The Object Pointer.
    # @param context   IkaLog context
    #