Home >Backend Development >Python Tutorial >How to create a voice translation bot with witai
In our globalized world, communication across linguistic boundaries is more essential than ever. In this article, we will explore how to implement this technology to make communication more inclusive and accessible to everyone.
The code is available here
on my github
First thing to do is install the dependencies
blinker==1.8.2 cachetools==5.5.0 certifi==2024.8.30 chardet==3.0.4 charset-normalizer==3.4.0 click==8.1.7 colorama==0.4.6 Flask==3.0.3 google-api-core==2.22.0 google-auth==2.36.0 google-cloud-texttospeech==2.21.0 googleapis-common-protos==1.65.0 googletrans==4.0.0rc1 grpcio==1.67.1 grpcio-status==1.67.1 gTTS==2.5.3 h11==0.9.0 h2==3.2.0 hpack==3.0.0 hstspreload==2024.11.1 httpcore==0.9.1 httpx==0.13.3 hyperframe==5.2.0 idna==2.10 itsdangerous==2.2.0 Jinja2==3.1.4 Levenshtein==0.26.1 MarkupSafe==3.0.2 playsound==1.2.2 prompt_toolkit==3.0.48 proto-plus==1.25.0 protobuf==5.28.3 pyasn1==0.6.1 pyasn1_modules==0.4.1 PyAudio==0.2.14 python-Levenshtein==0.26.1 RapidFuzz==3.10.1 requests==2.32.3 rfc3986==1.5.0 rsa==4.9 sniffio==1.3.1 SpeechRecognition==3.11.0 typing_extensions==4.12.2 urllib3==2.2.3 wcwidth==0.2.13 Werkzeug==3.1.2 wit==6.0.1
Audio to text conversion
from gtts import gTTS import playsound import os def speak_translation(text, lang): tts = gTTS(text=text, lang=lang) filename = "translation.mp3" tts.save(filename) playsound.playsound(filename) os.remove(filename)
Google cloud text Speech
from google.cloud import texttospeech def synthesize_speech(text, language_code="wo-WO", voice_name="wo-WO-Standard-A", output_file="output.mp3"): client = texttospeech.TextToSpeechClient() input_text = texttospeech.SynthesisInput(text=text) # Configurez la voix pour le Wolof voice = texttospeech.VoiceSelectionParams( language_code=language_code, name=voice_name, ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL, ) # Paramètres audio audio_config = texttospeech.AudioConfig( audio_encoding=texttospeech.AudioEncoding.MP3 ) # Synthèse vocale response = client.synthesize_speech( input=input_text, voice=voice, audio_config=audio_config ) # Sauvegarder le fichier audio with open(output_file, "wb") as out: out.write(response.audio_content) print(f"Audio content written to file {output_file}") # Utilisez cette fonction avec votre texte synthesize_speech("Bonjour, je teste la traduction en Wolof.", "wo-WO")
Translation
from googletrans import Translator def translate_text(text, target_lang): try: translator = Translator() translation = translator.translate(text, dest=target_lang) print(f"Traduction : {translation.text}") return translation.text except Exception as e: print(f"Erreur lors de la traduction : {e}") return "Traduction non disponible"
Voice detection
import speech_recognition as sr def record_audio(): recognizer = sr.Recognizer() with sr.Microphone() as source: print("Parlez maintenant...") audio = recognizer.listen(source) try: text = recognizer.recognize_google(audio, language="fr-FR") print(f"Vous avez dit : {text}") return text except sr.UnknownValueError: print("Désolé, je n'ai pas compris.") except sr.RequestError as e: print(f"Erreur de service : {e}")
Witai params:
You must go to the Meta API (Facebook) to create your token
import requests WIT_AI_TOKEN = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' def send_to_wit(text): headers = {'Authorization': f'Bearer {WIT_AI_TOKEN}'} response = requests.get(f'https://api.wit.ai/message?v=20230414&q={text}', headers=headers) return response.json()
The main file
from flask import Flask, request, jsonify from convertion_audio_to_text import speak_translation from translation import translate_text from voice_detection import record_audio from witai_params import send_to_wit import Levenshtein app = Flask(__name__) # Langues disponibles AVAILABLE_LANGUAGES = { "sw": "Swahili", "wo": "Wolof", "fon": "Fon", "en": "Anglais", "fr": "Français" } def calculate_score(reference_text, user_text): similarity = Levenshtein.ratio(reference_text.lower(), user_text.lower()) * 100 return round(similarity, 2) @app.route('/available_languages', methods=['GET']) def available_languages(): """Retourne les langues disponibles pour la traduction.""" return jsonify(AVAILABLE_LANGUAGES) @app.route('/process_audio', methods=['POST']) def process_audio(): """Traite l'audio, traduit le texte et évalue la prononciation.""" try: # Étape 1 : Récupérer la langue cible depuis la requête target_lang = request.json.get('target_lang') if not target_lang: return jsonify({"error": "Paramètre 'target_lang' manquant"}), 400 if target_lang not in AVAILABLE_LANGUAGES: return jsonify({ "error": f"Langue cible '{target_lang}' non supportée.", "available_languages": AVAILABLE_LANGUAGES # Retourner la liste des langues disponibles }), 400 # Étape 2 : Traduire le texte initial text = record_audio() if not text: return jsonify({"error": "No audio detected or transcription failed"}), 400 wit_response = send_to_wit(text) print("Wit.ai Response:", wit_response) translation = translate_text(text, target_lang) speak_translation(translation, lang=target_lang) # Étape 3 : Boucle de répétition pour évaluer la prononciation score = 0 while score < 80: repeat_text = record_audio() if not repeat_text: return jsonify({"error": "No repeated audio detected"}), 400 score = calculate_score(translation, repeat_text) if score >= 80: message = "Bravo! Félicitations, vous êtes un génie!" return jsonify({ "original_text": text, "wit_response": wit_response, "translated_text": translation, "repeated_text": repeat_text, "score": score, "message": message }), 200 elif score < 45: message = "Votre score est faible, améliorez-vous en vous entraînant." else: message = "Pas mal! Vous pouvez encore améliorer." return jsonify({ "translated_text": translation, "repeated_text": repeat_text, "score": score, "message": message, "retry": True }) except Exception as e: return jsonify({"error": str(e)}), 500 if __name__ == '__main__': app.run(debug=True) """ tu peux tester avec ce code dans le navigateur, tu decommente, puis tu le met la ou il faut @app.route('/process_audio', methods=['GET', 'POST']) def process_audio(): if request.method == 'GET': return jsonify({"message": "Utilisez une requête POST pour traiter l'audio."}) # Continue avec la logique POST try: text = record_audio() if not text: return jsonify({"error": "No audio detected or transcription failed"}), 400 wit_response = send_to_wit(text) print("Wit.ai Response:", wit_response) target_lang = request.json.get('target_lang', 'sw') translation = translate_text(text, target_lang) speak_translation(translation, lang=target_lang) return jsonify({ "original_text": text, "wit_response": wit_response, "translated_text": translation }), 200 except Exception as e: return jsonify({"error": str(e)}), 500 """
Designing a bot is becoming easier and easier today to solve complex problems in our daily lives. However, this does not exclude the importance of learning languages on your own. The use of technologies like BotAI for instant voice translation should primarily serve to enrich our interactions in complex contexts. By combining these tools with personal language learning, we promote more effective communication while promoting individual linguistic wealth.
The code is available here
on my github
The above is the detailed content of How to create a voice translation bot with witai. For more information, please follow other related articles on the PHP Chinese website!