Skip to content

Commit

Permalink
class text 추출한 파일을 음성으로 출
Browse files Browse the repository at this point in the history
  • Loading branch information
Leedong414 committed May 28, 2024
1 parent 712de55 commit 4e32dd2
Showing 1 changed file with 355 additions and 0 deletions.
355 changes: 355 additions & 0 deletions Untitled1.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,355 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"authorship_tag": "ABX9TyNnTQLE9Hln//EdaNSeva+y",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/Leedong414/yolov5/blob/master/Untitled1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "8pmMHhGEOE2B"
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"source": [
"from google.colab import drive\n",
"drive.mount('/content/drive')"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "NBJOrKoMOGT5",
"outputId": "f1b29278-57c4-4886-93a5-ecf01612288e"
},
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Mounted at /content/drive\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"pip install gtts"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"collapsed": true,
"id": "BeEnl52JOKNY",
"outputId": "7b73f30b-f650-45ef-af5f-4b5f7c85eff3"
},
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Collecting gtts\n",
" Downloading gTTS-2.5.1-py3-none-any.whl (29 kB)\n",
"Requirement already satisfied: requests<3,>=2.27 in /usr/local/lib/python3.10/dist-packages (from gtts) (2.31.0)\n",
"Requirement already satisfied: click<8.2,>=7.1 in /usr/local/lib/python3.10/dist-packages (from gtts) (8.1.7)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->gtts) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->gtts) (3.7)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->gtts) (2.0.7)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.27->gtts) (2024.2.2)\n",
"Installing collected packages: gtts\n",
"Successfully installed gtts-2.5.1\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"from gtts import gTTS\n",
"\n",
"def speak(text):\n",
"\ttts = gTTS(text=text, lang='ko')\n",
"\ttts.save('voice.mp3')\n",
"\n",
"speak(\"안녕하세요, 저는 IML이에요.\")"
],
"metadata": {
"id": "VnDDOpZjOYGe"
},
"execution_count": 4,
"outputs": []
},
{
"cell_type": "code",
"source": [
"tts = gTTS(text=text, lang='ko')\n",
"mp3_fp = BytesIO()\n",
"tts.write_to_fp(mp3_fp)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 184
},
"id": "jLuBkQBZO5cv",
"outputId": "3de9b61b-8168-4272-c409-5d5b6eca5c7d"
},
"execution_count": 5,
"outputs": [
{
"output_type": "error",
"ename": "NameError",
"evalue": "name 'text' is not defined",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-5-ace0f3904d56>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgTTS\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtext\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtext\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlang\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'ko'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mmp3_fp\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mBytesIO\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mtts\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwrite_to_fp\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmp3_fp\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'text' is not defined"
]
}
]
},
{
"cell_type": "code",
"source": [
"%cd /content/drive/MyDrive/yolov5"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "BJw9zD_OUeYI",
"outputId": "f8e4baf3-9809-4504-d5db-2f0a487910d6"
},
"execution_count": 16,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"/content/drive/MyDrive/yolov5\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!python detect.py --weights /content/drive/MyDrive/yolov5/runs/train/Project_yolo5/weights/best.pt --conf 0.5 --source /content/drive/MyDrive/13.jpg\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "44W31K33V2nK",
"outputId": "c38dd175-49ca-4622-b830-7603bd7a9b38"
},
"execution_count": 20,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Using cache found in /root/.cache/torch/hub/ultralytics_yolov5_master\n",
"YOLOv5 🚀 2024-5-28 Python-3.10.12 torch-2.3.0+cu121 CPU\n",
"\n",
"Fusing layers... \n",
"YOLOv5s summary: 157 layers, 7037095 parameters, 0 gradients, 15.8 GFLOPs\n",
"Adding AutoShape... \n",
"Detected Classes:\n",
"Hardhat\n",
"NO-Mask\n",
"NO-Mask\n",
"Hardhat\n",
"Safety Vest\n",
"sh: 1: mpg321: not found\n",
"\u001b[34m\u001b[1mdetect: \u001b[0mweights=['/content/drive/MyDrive/yolov5/runs/train/Project_yolo5/weights/best.pt'], source=/content/drive/MyDrive/13.jpg, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.5, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_csv=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n",
"YOLOv5 🚀 2024-5-28 Python-3.10.12 torch-2.3.0+cu121 CPU\n",
"\n",
"Fusing layers... \n",
"YOLOv5s summary: 157 layers, 7037095 parameters, 0 gradients, 15.8 GFLOPs\n",
"image 1/1 /content/drive/MyDrive/13.jpg: 608x640 2 Hardhats, 2 NO-Masks, 1 Safety Vest, 674.3ms\n",
"Speed: 9.7ms pre-process, 674.3ms inference, 1.1ms NMS per image at shape (1, 3, 640, 640)\n",
"Results saved to \u001b[1mruns/detect/exp3\u001b[0m\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"import pyaudio\n",
"import wave\n",
"import speech_recognition as sr\n",
"\n",
"def audio_save():\n",
"\n",
" FORMAT = pyaudio.paInt16 # 16비트 형식으로 설정\n",
" CHANNELS = 1\n",
" RATE = 44100\n",
" CHUNK = 1024\n",
" RECORD_SECONDS = 10\n",
" OUTPUT_FILENAME = \"recorded_audio.wav\" # 저장할 파일 이름\n",
"\n",
"\n",
" audio = pyaudio.PyAudio()\n",
"\n",
" stream = audio.open(format=FORMAT, channels=CHANNELS,\n",
" rate=RATE, input=True,\n",
" frames_per_buffer=CHUNK)\n",
"\n",
" print(\"녹음 시작...\")\n",
"\n",
" frames = []\n",
" for _ in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n",
" data = stream.read(CHUNK)\n",
" frames.append(data)\n",
"\n",
" print(\"녹음 완료.\")\n",
"\n",
"\n",
" stream.stop_stream()\n",
" stream.close()\n",
" audio.terminate()\n",
"\n",
" # 음성 파일로 저장\n",
" with wave.open(OUTPUT_FILENAME, 'wb') as wf:\n",
" wf.setnchannels(CHANNELS)\n",
" wf.setsampwidth(audio.get_sample_size(FORMAT))\n",
" wf.setframerate(RATE)\n",
" wf.writeframes(b''.join(frames))\n",
"\n",
" return print(\"파일 저장 완료:\", OUTPUT_FILENAME)\n",
"\n",
"def tts_module():\n",
"\n",
" r = sr.Recognizer()\n",
" kr_audio = sr.AudioFile('recorded_audio.wav')\n",
"\n",
" with kr_audio as source:\n",
" audio = r.record(source)\n",
"\n",
" a=r.recognize_google(audio, language='ko-KR')\n",
" print(r.recognize_google(audio, language='ko-KR'))\n",
"\n",
"\n",
"\n",
" return a\n",
"\n",
"if __name__ == '__main__':\n",
" audio_save()\n",
" sentiment_data=tts_module()\n",
" print(\"a\",sentiment_data)"
],
"metadata": {
"id": "-uVK5NOKaiF6"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"sentiment_data=='내 눈 앞에 뭐가 있어'"
],
"metadata": {
"id": "P9WufmsAbG02"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"import speech_recognition as sr\n",
"import subprocess\n",
"\n",
"def tts_module(audio_file):\n",
" r = sr.Recognizer()\n",
"\n",
" with sr.AudioFile(audio_file) as source:\n",
" audio = r.record(source)\n",
"\n",
" try:\n",
" recognized_text = r.recognize_google(audio, language='ko-KR')\n",
" print(\"인식된 텍스트:\", recognized_text)\n",
" return recognized_text\n",
" except sr.UnknownValueError:\n",
" print(\"Google Speech Recognition이 음성을 인식할 수 없습니다.\")\n",
" return \"\"\n",
" except sr.RequestError as e:\n",
" print(f\"Google Speech Recognition 서비스에 요청할 수 없습니다; {e}\")\n",
" return \"\"\n",
"\n",
"if __name__ == '__main__':\n",
" audio_file = '/content/drive/MyDrive/recorded_audio (1).wav' # 이미 녹음된 음성 파일 경로\n",
" recognized_text = tts_module(audio_file)\n",
"\n",
" if recognized_text == \"내 앞에 뭐 있어\":\n",
" print(\"명령어를 인식했습니다. detect.py를 실행합니다.\")\n",
" subprocess.run([\"python\", \"/content/drive/MyDrive/yolov5/detect.py\", \"--weights\", \"/content/drive/MyDrive/yolov5/runs/train/Project_yolo5/weights/best.pt\", \"--conf\", \"0.5\", \"--source\",\"/content/drive/MyDrive/33.jpg\"\n",
"])\n",
"\n",
" else:\n",
" print(\"인식된 명령어가 없습니다.\")"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "KXSMitA_xiJE",
"outputId": "e667394c-2649-446c-c41e-22b915a7bec1"
},
"execution_count": 101,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"인식된 텍스트: 내 앞에 뭐 있어\n",
"명령어를 인식했습니다. detect.py를 실행합니다.\n"
]
}
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "K1iWPfmVx1Wp"
},
"execution_count": null,
"outputs": []
}
]
}

0 comments on commit 4e32dd2

Please sign in to comment.