| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126 |
- //
- // Copyright 2016 Google Inc. All Rights Reserved.
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
- //
- import Foundation
- let API_KEY : String = "YOUR_API_KEY"
- let HOST = "speech.googleapis.com"
- typealias SpeechRecognitionCompletionHandler = (Any?, NSError?) -> (Void)
- class SpeechRecognitionService {
- var sampleRate: Int = 16000
- private var nowStreaming = false
- var fileDescriptorSet : FileDescriptorSet
- var channel: Channel
- var call: Call?
- var completion: SpeechRecognitionCompletionHandler!
- static let sharedInstance = SpeechRecognitionService()
- private init() {
- fileDescriptorSet = FileDescriptorSet(filename: "speech.out")
- channel = Channel(address:HOST, certificates: nil, host: nil)
- }
- func streamAudioData(_ audioData: NSData, completion: @escaping SpeechRecognitionCompletionHandler) {
- self.completion = completion
- do {
- if (!nowStreaming) {
- // if we aren't already streaming, set up a gRPC connection
- call = channel.makeCall("/google.cloud.speech.v1beta1.Speech/StreamingRecognize")
- if let call = call {
- let metadata = Metadata(["x-goog-api-key":API_KEY,
- "x-ios-bundle-identifier":Bundle.main.bundleIdentifier!])
- try call.start(metadata:metadata)
- let recognitionConfig = fileDescriptorSet.makeMessage("RecognitionConfig")!
- recognitionConfig.addField("encoding", value: 1)
- recognitionConfig.addField("sample_rate", value: self.sampleRate)
- recognitionConfig.addField("language_code", value: "en-US")
- recognitionConfig.addField("max_alternatives", value: 30)
- let streamingRecognitionConfig = fileDescriptorSet.makeMessage("StreamingRecognitionConfig")!
- streamingRecognitionConfig.addField("config", value: recognitionConfig)
- streamingRecognitionConfig.addField("single_utterance", value: false)
- streamingRecognitionConfig.addField("interim_results", value: true)
- let streamingRecognizeRequest = fileDescriptorSet.makeMessage("StreamingRecognizeRequest")!
- streamingRecognizeRequest.addField("streaming_config", value:streamingRecognitionConfig)
- let messageData = streamingRecognizeRequest.data()
- _ = call.sendMessage(data:messageData)
- nowStreaming = true
- self.receiveMessage()
- }
- }
- if let call = call {
- let streamingRecognizeRequest = fileDescriptorSet.makeMessage("StreamingRecognizeRequest")!
- streamingRecognizeRequest.addField("audio_content", value: audioData)
- let messageData = streamingRecognizeRequest.data()
- let success = call.sendMessage(data:messageData)
- if (!success) {
- stopStreaming() // restart
- }
- }
- } catch (let error) {
- print("Call error: \(error)")
- }
- }
- func receiveMessage() {
- do {
- if let call = call {
- try call.receiveMessage() {(data) in
- if let data = data {
- if let responseMessage =
- self.fileDescriptorSet.readMessage("StreamingRecognizeResponse", data:data) {
- self.completion(responseMessage, nil)
- }
- }
- self.receiveMessage()
- }
- }
- } catch (let error) {
- print("Receive error: \(error)")
- }
- }
- func stopStreaming() {
- if (!nowStreaming) {
- return
- }
- nowStreaming = false
- if let call = call {
- do {
- try call.close {}
- } catch (let error) {
- print("call error \(error)")
- }
- }
- }
-
- func isStreaming() -> Bool {
- return nowStreaming
- }
- }
|