Export (0) Print
Expand All

Creating Call Center Applications with Unified Communications SDKs: Code Listing (Part 4 of 4)

Summary:   Learn how to use Unified Communications SDKs to implement speech synthesis, speech recognition, and call control technologies to create a simple call center application.

Applies to:   Microsoft Unified Communications Managed API (UCMA) 3.0 Core SDK or Microsoft Unified Communications Managed API 4.0 SDK | Microsoft Lync 2010 SDK | Microsoft Speech Platform SDK version (x64) 10.2

Published:   April 2012 | Provided by:   John Clarkson and Mark Parker, Microsoft | About the Author

Contents

Download code   Download code

Watch video   See video

This is the last in a series of four articles about how to create a call center application.

The following example contains the code from the Program.cs file in the bot application.

using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;

//added
using System.Threading;
using Microsoft.Rtc.Collaboration;
using Microsoft.Rtc.Collaboration.AudioVideo;
using Microsoft.Rtc.Signaling;
using Microsoft.Rtc.Collaboration.Sample.Common;
using System.Configuration;
using Microsoft.Speech.Synthesis;
using Microsoft.Speech.Recognition;
using Microsoft.Speech.AudioFormat;

namespace SimpleCallCenter
{
    class Program
    {

        #region Globals
        // The URI and connection server of the user to forward the call to.
        private String _transferUserURI;
        private UCMASampleHelper _helper;
        private UserEndpoint _userEndpoint;
        private AudioVideoCall _inboundAVCall, _outboundAVCall;
        private Microsoft.Rtc.Collaboration.Conversation _conversation;
        private ConversationContextChannel _channel;
        private CollaborationPlatform _collabPlatform;
        private AudioVideoFlow _audioVideoFlow;
        private string _name;
        private string _department;
        private SpeechSynthesisConnector _speechSynthesisConnector;
        private SpeechRecognitionConnector _speechRecognitionConnector;
        private SpeechSynthesizer _speechSynthesizer;
        private SpeechRecognitionEngine _speechRecognitionEngine;
        
        // Wait handles are used to synchronize the main thread and worker threads.
        private AutoResetEvent _waitForCallToBeTransferred = new AutoResetEvent(false);
        private AutoResetEvent _waitForCallToBeAccepted = new AutoResetEvent(false);
        private AutoResetEvent _waitForCallToBeEstablished = new AutoResetEvent(false);
        private AutoResetEvent _waitForShutdownEventCompleted = new AutoResetEvent(false);
        private AutoResetEvent _waitForRecoCompleted = new AutoResetEvent(false);
  
        #endregion

        static void Main(string[] args)
        {
            Program program = new Program();
            program.Run();
        }

        private void Run()
        {
            _helper = new UCMASampleHelper();
            _userEndpoint = _helper.CreateEstablishedUserEndpoint("Forwarding User" /*endpointFriendlyName*/);

            _userEndpoint.RegisterForIncomingCall<AudioVideoCall>(AudioVideoCall_Received);
            Console.WriteLine("Waiting for incoming call...");
            _waitForCallToBeTransferred.WaitOne();
            UCMASampleHelper.PauseBeforeContinuing("Press ENTER to shut down and exit.");

            // Shut down the sample.
            _helper.ShutdownPlatform();
        }

        // Query caller for name and department.
        // This method is called from the AudioVideoCall_Received event handler.
        private void GetCallerInfo()
        {
            PromptUserForDepartment();
            RecognizeDepartmentName(); 
        }

        public void PromptUserForDepartment()
        {
            // Cache the Flow from the incoming call.
            _audioVideoFlow = _inboundAVCall.Flow;

            // Create a speech synthesis connector and attach the AudioVideoFlow instance to it.
            _speechSynthesisConnector = new SpeechSynthesisConnector();

            _speechSynthesisConnector.AttachFlow(_audioVideoFlow);

            _speechSynthesizer = new SpeechSynthesizer();
            SpeechAudioFormatInfo audioformat = new SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);
            _speechSynthesizer.SetOutputToAudioStream(_speechSynthesisConnector, audioformat);

            // Register for notification of the SpeakCompleted and SpeakStarted events on the speech synthesizer.
            _speechSynthesizer.SpeakStarted += new EventHandler<SpeakStartedEventArgs>(_speechSynthesizer_SpeakStarted);
            _speechSynthesizer.SpeakCompleted += new EventHandler<SpeakCompletedEventArgs>(_speechSynthesizer_SpeakCompleted);

            // Start the speech synthesis connector.
            _speechSynthesisConnector.Start();
            _speechSynthesizer.Speak("Hello, can I connect you to billing or sales");
        }

 
        public void RecognizeDepartmentName()
        {
            // Create a speech recognition connector and attach it to a AudioVideoFlow.
            _speechRecognitionConnector = new SpeechRecognitionConnector();
            _speechRecognitionConnector.AttachFlow(_audioVideoFlow);

            // Start recognizing.
            SpeechRecognitionStream stream = _speechRecognitionConnector.Start();

            // Create a speech recognition engine and start recognizing by attaching the connector to the recognition engine.
            _speechRecognitionEngine = new SpeechRecognitionEngine();
            _speechRecognitionEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(speechRecognitionEngine_SpeechRecognized);
            _speechRecognitionEngine.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(_speechRecognitionEngine_SpeechRecognitionRejected);


            string[] departmentString = { "Sales", "Billing" };
            Choices departmentGrammar = new Choices(departmentString);
            _speechRecognitionEngine.LoadGrammar(new Grammar(new GrammarBuilder(departmentGrammar)));
            _speechRecognitionEngine.Grammars[0].Name = "department";

            string[] nameString = { "Terry Adams", "Toni Poe" };
            Choices nameGrammar = new Choices(nameString);
            _speechRecognitionEngine.LoadGrammar(new Grammar(new GrammarBuilder(nameGrammar)));
            _speechRecognitionEngine.Grammars[1].Name = "name";

            SpeechAudioFormatInfo speechAudioFormatInfo = new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);
            _speechRecognitionEngine.SetInputToAudioStream(stream, speechAudioFormatInfo);
            Console.WriteLine("\r\nGrammar loaded.");

            _speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple);

            _waitForRecoCompleted.WaitOne();
        }

        public void AskCallerName()
        {
            Console.WriteLine("AskCallerName()");
            _speechSynthesizer.Speak("And who shall I say is calling?");
            RecognizeName();
        }
        
        public void RecognizeName()
        {
            Console.WriteLine("\r\nGrammar loaded names, say exit to shutdown.");

            try { _speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple); }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }

         
        // Send caller info to the agent.
        private void SendCallerInfo(AudioVideoCall call)
        {
            Conversation conversation = call.Conversation;
            ParticipantEndpoint endpoint = call.RemoteEndpoint;
            Guid AppId = new Guid("client application GUID");
            _channel = new ConversationContextChannel(conversation, endpoint);
            ConversationContextChannelEstablishOptions options = new ConversationContextChannelEstablishOptions();
            options.ContextualData = SetCallerInfo();
            _channel.BeginEstablish(AppId, options, BeginContextEstablishCB, null);
        }

        // Determine what caller info is sent to the agent.
        // In a "real" application, the information would come from a database.
        private string SetCallerInfo()
        {
            string customerData = null;

            if (_name == "Toni Poe")
            {
                customerData = "3;Toni Poe;3456.00";
            }
            if (_name == "Terry Adams")
            {
                customerData = "4;Terry Adams;6789.88";
            }

            return customerData;
        }

        public void SpeechPlatformShutDown()
        {
            Console.WriteLine("department: " + _department);
            Console.WriteLine("name: " + _name);
            Console.Read();

            // Stop the connector.
            _speechRecognitionConnector.Stop();
            Console.WriteLine("Stopping the speech recognition connector");

            // Speech recognition connector must be detached from the flow, otherwise if the connector is rooted, it will keep the flow in memory.
            _speechRecognitionConnector.DetachFlow();

            // Shut down the platform.
            ShutdownPlatform();

            _waitForShutdownEventCompleted.WaitOne();
        }

        #region EVENT HANDLERS
        void AudioVideoCall_Received(object sender, CallReceivedEventArgs<AudioVideoCall> e)
        {
            //Type checking was done by the platform; no risk of this being any 
            // type other than the type expected.
            _inboundAVCall = e.Call;

            // Call: StateChanged: Only hooked up for logging, to show the call 
            // state transitions.
            _inboundAVCall.StateChanged += new
                EventHandler<CallStateChangedEventArgs>(inboundAVCall_StateChanged);

            Console.WriteLine("Call Received!");

            // Accept the call. Before transferring the call, it must be in the Established state.
            // Note that the docs are wrong in the state machine for the AVCall. BeginEstablish 
            // should be called on outgoing calls, not incoming calls.

            _inboundAVCall.BeginAccept(BeginAcceptCB, _inboundAVCall);

            // Wait for the call to be accepted, putting it into the Established state.
            _waitForCallToBeAccepted.WaitOne(2000);
            Console.WriteLine("ServerFQDN call state is {0}\n", _inboundAVCall.State);

            // Query caller for department and name.
            GetCallerInfo();

            // Which department should we transfer to?
            if(_department == "Sales")
                _transferUserURI = "SalesDepartmentURI";
            if (_department == "Billing")
                _transferUserURI = "BillingDepartmentURI";
            
            CallTransferOptions basicTransferOptions;
            IAsyncResult result;

            // Begin call transfer. 
            _conversation = new Conversation(_userEndpoint);
            _outboundAVCall = new AudioVideoCall(_conversation);
            _outboundAVCall.BeginEstablish(_transferUserURI, null, BeginEstablishCB, _outboundAVCall);
            _waitForCallToBeEstablished.WaitOne();

            basicTransferOptions = new CallTransferOptions(CallTransferType.Attended);
            result = _inboundAVCall.BeginTransfer(_outboundAVCall, basicTransferOptions, BeginTransferCB, _inboundAVCall);
            Console.WriteLine("Supervised transfer.");

            // Send contextual data.
            SendCallerInfo(_outboundAVCall);
        }

        // Handler for the StateChanged event on the AudioVideoFlow object.
        private void audioVideoFlow_StateChanged(object sender, MediaFlowStateChangedEventArgs e)
        {
            // When flow is active, media operations can begin.
            if (e.State == MediaFlowState.Terminated)
            {
                // Detach SpeechRecognitionConnector, since AVFlow is terminated.
                AudioVideoFlow avFlow = (AudioVideoFlow)sender;
                if (avFlow.SpeechRecognitionConnector != null)
                {
                    avFlow.SpeechRecognitionConnector.DetachFlow();
                }
            }
        }

        // Handler for the StateChanged event on the inbound AudioVideoCall object.
        void inboundAVCall_StateChanged(object sender, CallStateChangedEventArgs e)
        {
            Console.WriteLine("Inbound call has changed state.\n Previous call state: " + e.PreviousState +
                "\n Current call state: " + e.State);
        }

        // Handler for the SpeechRecognized event on the speech recognition engine.
        // This method determines which grammar was used (department grammar or user name grammar)
        // and sets either _department or _name accordingly.
        void speechRecognitionEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            RecognitionResult result = e.Result;
            if (result != null)
            {
                Console.WriteLine("Speech recognized: " + result.Text);

                if (result.Grammar.Name == "department")
                {
                    _department = result.Text;
                    AskCallerName();
                }

                else if (result.Grammar.Name == "name")
                {
                    _name = result.Text;
                    _waitForRecoCompleted.Set();
                    SpeechPlatformShutDown();
                }

                else
                    Console.WriteLine("Speech not name or dept: " + result.Text);
            }
        }

        void _speechSynthesizer_SpeakCompleted(object sender, SpeakCompletedEventArgs e)
        {
            Console.WriteLine("_speechSynthesizer_SpeakCompleted event handler");
        }

        void _speechSynthesizer_SpeakStarted(object sender, SpeakStartedEventArgs e)
        {
            Console.WriteLine("_speechSynthesizer_SpeakStarted event handler");
        }

        void _speechRecognitionEngine_SpeechRecognitionRejected(object sender, SpeechRecognitionRejectedEventArgs e)
        {
            Console.WriteLine("Speech reco rejected: " + e.Result.Text);
        }

        #endregion

        #region CALLBACK METHODS
        // Callback for BeginAccept on the incoming call.
        void BeginAcceptCB(IAsyncResult ar)
        {
            _inboundAVCall.EndAccept(ar);
            Console.WriteLine("Inbound call is accepted.\n");
        }

        // Callback for BeginEstablish on the outgoing call.
        void BeginEstablishCB(IAsyncResult ar)
        {
            _outboundAVCall.EndEstablish(ar);
            Console.WriteLine("Outbound call is established.\n");
            _waitForCallToBeEstablished.Set();
        }

        // Callback for BeginTransfer on the incoming call.
        void BeginTransferCB(IAsyncResult ar)
        {
            try
            {
                _inboundAVCall.EndTransfer(ar);
                Console.WriteLine("Transfer completed.\n");
                _waitForCallToBeTransferred.Set();
            }
            catch (FailureResponseException fre)
            {
                Console.WriteLine("Transfer failed: " + fre.ToString() + "\n");
            }
            catch (OperationTimeoutException ote)
            {
                Console.WriteLine("Transfer failed: " + ote.ToString() + "\n");
            }
            catch (OperationFailureException ofe)
            {
                Console.WriteLine("Transfer failed: " + ofe.ToString() + "\n");
            }
            catch (FailureRequestException fre)
            {
                Console.WriteLine("Transfer failed: " + fre.ToString() + "\n");
            }

        }

        // Callback for BeginContextEstablish on the context channel.
        void BeginContextEstablishCB(IAsyncResult res)
        {
            if (_channel.State == ConversationContextChannelState.Establishing)
            {
                Console.WriteLine("Context channel is in the Establishing state");
                _channel.EndEstablish(res);
            }
        }

        #endregion

 
        #region PLATFORM CLEANUP
        private void ShutdownPlatform()
        {
            // Shutdown the platform.     
            _collabPlatform = _audioVideoFlow.Call.Conversation.Endpoint.Platform;
            _collabPlatform.BeginShutdown(EndPlatformShutdown, _collabPlatform);
        }

        private void EndPlatformShutdown(IAsyncResult ar)
        {
            CollaborationPlatform collabPlatform = ar.AsyncState as CollaborationPlatform;

            // Shutdown actions will not throw.
            collabPlatform.EndShutdown(ar);
            Console.WriteLine("The platform is now shutdown.");

            // Again, just to sync the completion of the code and the platform teardown.
            _waitForShutdownEventCompleted.Set();
        }
        #endregion

    }
}

The following examples show the code used to create the agent application.

Silverlight XAML Page

The following example shows the Microsoft Silverlight page .xaml file. This XAML text creates a button attached to a Click event handler in the code behind, and three text boxes with labels to display customer data.

<UserControl x:Class="SilverlightApplication4.MainPage"
    xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
    xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
    xmlns:d="http://schemas.microsoft.com/expression/blend/2008"
    xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006"
    mc:Ignorable="d"
    d:DesignHeight="300" d:DesignWidth="400">

    <Grid x:Name="LayoutRoot">

        <TextBox Height="23" HorizontalAlignment="Left" Margin="21,74,0,0" Name="textBoxCustomerID" VerticalAlignment="Top" Width="120" />
        <TextBox Height="23" HorizontalAlignment="Left" Margin="21,128,0,0" Name="textBoxCustomerName" VerticalAlignment="Top" Width="120" />
        <TextBox Height="23" HorizontalAlignment="Left" Margin="21,179,0,0" Name="textBoxCustomerBalance" VerticalAlignment="Top" Width="120" />
        <TextBlock Height="23" HorizontalAlignment="Left" Margin="173,78,0,0" Name="textBlock1" Text="ID" VerticalAlignment="Top" />
        <TextBlock Height="23" HorizontalAlignment="Left" Margin="173,132,0,0" Name="textBlock2" Text="Name" VerticalAlignment="Top" />
        <TextBlock Height="23" HorizontalAlignment="Left" Margin="173,179,0,0" Name="textBlock3" Text="Balance" VerticalAlignment="Top" />
        <Button Click="GetUserInfo_Click"  Content="GetUserInfo" Height="23" HorizontalAlignment="Left" Margin="21,33,0,0" Name="button1" VerticalAlignment="Top" Width="75" />
    </Grid>
</UserControl>

Silverlight Code-Behind Page

The following example shows the content of the Silverlight page code-behind file. This code obtains context data sent by the bot application, parses the data, and displays it in Silverlight controls on the XAML page.

using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Animation;
using System.Windows.Shapes;

//added
using Microsoft.Lync.Model.Extensibility;
using Microsoft.Lync.Model.Conversation;

namespace SilverlightApplication4
{
    public partial class MainPage : UserControl
    {
        public MainPage()
        {
            InitializeComponent();
        }

        private void GetUserInfo_Click(object sender, RoutedEventArgs e)
        {
            //This GUID registers the application on this computer.
            //It is identical to the GUID for the bot application.
            string appId = "your application GUID";

            //Get the Conversation object from the Lync client.
            Conversation conversation = (Conversation)Microsoft.Lync.Model.LyncClient.GetHostingConversation();

            //The GetApplicationData method gets the data
            //sent from the bot. This is the information about the
            //caller.
            string text = conversation.GetApplicationData(appId);

            //Parse the string passed in as contextual data.
            char[] delimiter = { ';' };
            string[] custData = text.Split(delimiter);

            //Display array elements in text boxes.
            textBoxCustomerID.Text = custData[0];
            textBoxCustomerName.Text = custData[1];
            textBoxCustomerBalance.Text = custData[2];
        }
    }
}

Combine speech synthesis and speech recognition technologies with the call management features that are provided in the UCMA 3.0 Core SDK to create a call center application.

Mark Parker is a programming writer at Microsoft Corporation. His principal responsibility is the UCMA SDK documentation.

John Clarkson is also a programming writer on the Microsoft Lync feature team.

Show:
© 2015 Microsoft