ESL 연구실 활동/LAB

mediapipe를 이용한 3D shoe detection

천숭이 2022. 2. 24. 22:47
# -*- coding: utf-8 -*-
"""mediapipe_objectron.ipynb
Automatically generated by Colaboratory.
Original file is located at
    https://colab.research.google.com/drive/1V7UxCaOuyeUWL2kwEc1RqgJrbtutzfPI
"""

import cv2
import numpy as np

import mediapipe as mp
mp_objectron = mp.solutions.objectron
mp_drawing = mp.solutions.drawing_utils

"""Mediapipe Objectron provides pre-trained models for shoe, chair, cup and camera.
***
#Objectron Shoe Model
Upload any image that that has a person with visible upper body to the Colab. We take two examples image from the web: https://unsplash.com/photos/8dukMg99Hd8 and https://unsplash.com/photos/PqbL_mxmaUE
"""

import glob

# Read images with OpenCV.
shoe_images = {name: cv2.imread(name) for name in glob.glob("shoe.jpg")}

print(len(shoe_images))

# print(shoe_images)

for name, image in shoe_images.items():
  print(name)
  # cv2.imshow("image", image)

  cv2.waitKey()

with mp_objectron.Objectron(
    static_image_mode=True,
    max_num_objects=5,
    min_detection_confidence=0.5,
    model_name='Shoe') as objectron:
  # Run inference on shoe images.

  for name, image in shoe_images.items():
    # Convert the BGR image to RGB and process it with MediaPipe Objectron.
    results = objectron.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

    # Draw box landmarks.
    if not results.detected_objects:
      print(f'No box landmarks detected on {name}')
      continue
    print(f'Box landmarks of {name}:')
    annotated_image = image.copy()
    for detected_object in results.detected_objects:
      mp_drawing.draw_landmarks(
          annotated_image, detected_object.landmarks_2d, mp_objectron.BOX_CONNECTIONS)
      mp_drawing.draw_axis(annotated_image, detected_object.rotation, detected_object.translation)

    annotated_image = cv2.resize(annotated_image, dsize=(800, 800))
    cv2.imshow("test", annotated_image)
    print("detected_object : ", detected_object.rotation, detected_object.translation)
    cv2.waitKey()

"""***
#Objectron Chair Model
Upload any image that that has chairs to the Colab. We take one example image from the web: https://unsplash.com/photos/7T8vSHYXq4U
"""

import glob

# Read images with OpenCV.
chair_images = {name: cv2.imread(name) for name in glob.glob("chair/*.png")}

# Preview the images.
for name, image in chair_images.items():
  print(name)
  cv2.imshow("test", image)

with mp_objectron.Objectron(
    static_image_mode=True,
    max_num_objects=5,
    min_detection_confidence=0.5,
    model_name='Chair') as objectron:
  # Run inference on chair images.

  for name, image in chair_images.items():
    # Convert the BGR image to RGB and process it with MediaPipe Objectron.
    results = objectron.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

    # Draw box landmarks.
    if not results.detected_objects:
      print(f'No box landmarks detected on {name}')
      continue
    print(f'Box landmarks of {name}:')
    annotated_image = image.copy()
    for detected_object in results.detected_objects:
      mp_drawing.draw_landmarks(
          annotated_image, detected_object.landmarks_2d, mp_objectron.BOX_CONNECTIONS)
      mp_drawing.draw_axis(annotated_image, detected_object.rotation, detected_object.translation)
    annotated_image = cv2.resize(annotated_image, dsize=(800, 800))
    cv2.imshow("test", annotated_image)

"""***
#Objectron Cup Model
Upload any image that that has cups to the Colab. We take one example image from the web: https://unsplash.com/photos/WJ7gZ3cilBA
# """

 

 

- model_name 에 라이브러리에서 제공된 클래스인 "cup", "chair" .. 등등 넣을 수 있는데 8개로 매우 적은 클래스이다.

'ESL 연구실 활동 > LAB' 카테고리의 다른 글

0413 오늘 한거 (ZSK 실습, docker)  (0) 2022.04.13
original cifar10 (5 만장 불러와서)  (0) 2022.02.28
//0224// 멧돼지Net  (0) 2022.02.25
혼공머 회귀 작성중  (0) 2022.01.30
오늘 한 거  (0) 2022.01.26
혼공머 스터디 (1~2장)  (0) 2022.01.25