So verwenden Sie eine Azure -Sprache zur Text -API, um ein Transkriptergebnis für arabische Audios zu erzielenC#

Ein Treffpunkt für C#-Programmierer
Guest
 So verwenden Sie eine Azure -Sprache zur Text -API, um ein Transkriptergebnis für arabische Audios zu erzielen

Post by Guest »

Ich habe eine Azure -Rede zur Text -API verwendet, um einen Text aus Audio zu erhalten. Ich habe arabisch -audio verwendet. Wie verbessert man das Ergebnis, um die Rede in Audio zu entsprechen? Das Ergebnis von API Der Text stimmt nicht mit der Rede in der Audiodatei überein. Wie handhaben ich damit? Zentren
My Action -Upload Audio, die Audio für die Konvertierung in Text API in Azure:

Code: Select all

using Microsoft.AspNetCore.Mvc;
using Microsoft.EntityFrameworkCore;
using SpeechAnalytics.Backend.Services;
using SpeechAnalytics.Backend.Services.backgroundTask;
using SpeechAnalytics.Backend.ViewModel;
using SpeechAnalytics.Core.Entities;

namespace SpeechAnalytics.Backend.Controllers
{
public class AudioController : BaseController
{
private readonly IWebHostEnvironment _webHostEnvironment;
private readonly SpeechWordsCalculateService _speechWordsService;
// private TranscriptionConvertService _transcription;
private AudioTranscriptionTask _audioTranscriptionTask;

public AudioController(AudioTranscriptionTask audioTranscriptionTask, IWebHostEnvironment webHostEnvironment, SpeechWordsCalculateService speechWordsService)
{
// _transcription = transcription;
_audioTranscriptionTask = audioTranscriptionTask;
_webHostEnvironment = webHostEnvironment;
_speechWordsService = speechWordsService;
}

public IActionResult UpladAudio()
{
AudioVM model = new() { Date = DateTime.Now };
return View(model);
}

//Done action
[HttpPost]
public async Task UpladAudio(AudioVM audio)
{
#region PreviousNotEnqueueCode
// Save the udio information to the database
//var model = new Audio
//{
//    FilePath = audio.FilePath,
//    AudioName = audio.AudioName,
//    audioStatus = AudioStatus.WaitTranscribing,
//    UserId = CurrentUserData.UserId,
//    Date = audio.Date
//};
//await _context.Audios.AddAsync(model);
//await _context.SaveChangesAsync();

//string attachmentFolderPath = Path.Combine(_webHostEnvironment.WebRootPath, "Attachments");
//string audioFilePath = Path.Combine(attachmentFolderPath, audio.FilePath);
//var res = await _transcription.ConversationTranscriber(audioFilePath);
//if (res.Success == true)
//{
//        model.audioStatus = AudioStatus.Transcribed;
//        _context.Audios.Update(model);

//    var TranscriptionModel = new AudioTranscription()
//    {
//        AudioId = model.Id,
//        Transcription = JsonConvert.SerializeObject(res.Transcriptions),
//        IsDeleted = false
//    };

//    _context.AudioTranscriptions.Add(TranscriptionModel);
//}
//else
//{
//    model.audioStatus = AudioStatus.NotTranscribed;
//    _context.Audios.Update(model);
//}
//_context.SaveChanges();

#endregion

var StandreadQuota = TimeSpan.FromSeconds(_context.Settings.FirstOrDefault().QuotaSystemSeconds);
var audioDurationSum = (int) _context.Audios.Where(a => a.UserId == CurrentUserData.UserId && a.audioStatus == AudioStatus.Transcribed && a.IsDeleted != true).Sum(a => a.audioDuration);
var ReminingQuota = (int) Math.Round(StandreadQuota.Subtract(TimeSpan.FromSeconds(audioDurationSum)).TotalSeconds);
var audioDuration = (int) GetAudioDuration(Path.Combine(_webHostEnvironment.WebRootPath, "Attachments", audio.FilePath));

// Save the audio information to the database
var model = new Audio
{
FilePath = audio.FilePath,
AudioName = audio.AudioName,
audioDuration = GetAudioDuration(Path.Combine(_webHostEnvironment.WebRootPath, "Attachments", audio.FilePath)),
audioStatus = AudioStatus.WaitTranscribing,
UserId = CurrentUserData.UserId,
Date = audio.Date
};

if(audioDuration > ReminingQuota)
{
return Json(-1);
}

await _context.Audios.AddAsync(model);
await _context.SaveChangesAsync();

// Enqueue the audio transcription task as a background task
//Task.Run(() =>  _audioTranscriptionTask.ExecuteAsync(Path.Combine(_webHostEnvironment.WebRootPath, "Attachments", audio.FilePath), model.Id , model.UserId , audioDuration));

Task.Run(async () =>
{
await _audioTranscriptionTask.ExecuteAsync(Path.Combine(_webHostEnvironment.WebRootPath, "Attachments", audio.FilePath), model.Id, model.UserId, audioDuration);
await _speechWordsService.CalculateSpeechWordsInAllTranscription(model.UserId);
});

return Json(1);
}
}
}
Und in der Audio -Aktion hochladen, die eine ExecuteAsync enthält. Diese Methode enthält die Methode, die die Azure -Sprache zu Text -API und diesen Code davon verbindet: < Br />

Code: Select all

public async Task ExecuteAsync(string audioFilePath, int audioId , int userId , int audioDuration)
{
Audio audio = new();
var result = await _transcriptionService.ConversationTranscriber(audioFilePath);

using (var scope = new TransactionScope(TransactionScopeAsyncFlowOption.Enabled))
{
var dbContextOptions = new DbContextOptionsBuilder().UseSqlServer("Server=10.1.1.210;Database=SpeechAnalyticsDB;User Id=sa;Password=sa_2014;TrustServerCertificate=true;").Options;

using (var context = new SpeechAnalyticsDbContext(dbContextOptions))
{
audio = await context.Audios.FindAsync(audioId);

#region Calculate Quota
var StandreadQuota = TimeSpan.FromSeconds(context.Settings.FirstOrDefault().QuotaSystemSeconds);
var audioDurationSum = (int)context.Audios.Where(a => a.UserId == userId && a.audioStatus == AudioStatus.Transcribed && a.IsDeleted != true).Sum(a => a.audioDuration);
var ReminingQuota = (int)Math.Round(StandreadQuota.Subtract(TimeSpan.FromSeconds(audioDurationSum)).TotalSeconds);
#endregion

//var User = context.Users.Find(UserId);
if (result.Success)
{
if (audioDuration 
{
if (e.Result.Reason == ResultReason.RecognizedSpeech)
{
var detailedResults = e.Result.Best();
var words = new List();

if (detailedResults != null &&  detailedResults.Any())
{
var bestResults = detailedResults.ToList()[0];

transcription = new TranscriptionVM
{
SpeakerId = e.Result.SpeakerId,
Text = e.Result.Text,
StartTime = e.Result.OffsetInTicks / (10_000_000d * 60),
Duration = e.Result.Duration.Ticks / (10_000_000d * 60),
Words = bestResults.Words?.Select(a => new WordTimestampVM()
{
Word = a?.Word

}).ToList(),
};
}
transcriptions.Add(transcription);
}
};

conversationTranscriber.Canceled += (s, e) =>
{
// Handle cancellation scenario if needed
};

conversationTranscriber.SessionStopped += (s, e) =>
{
// Handle session stopped scenario if needed
stopRecognition.TrySetResult(0);
};

await conversationTranscriber.StartTranscribingAsync();

// Wait for completion
await stopRecognition.Task;

await conversationTranscriber.StopTranscribingAsync();
// Check if transcriptions were generated
if (transcriptions.Count > 0)
{
var response = new
{
Success = true,
Transcriptions = transcriptions
};

return response;
}
else
{
var response = new
{
Success = false,
Message = "Transcription failed. No transcriptions were generated."
};

return response;
}
}
}
}
catch (Exception ex)
{
// Handle any exceptions that occur during transcription
var response = new
{
Success = false,
Message = "Transcription failed: " + ex.Message
};

return response;

}
}

Quick Reply

Change Text Case: 
   
  • Similar Topics
    Replies
    Views
    Last post