Friday, June 27, 2025

Create a generative AI app that uses your own data

Create a generative AI app that uses your own data
https://microsoftlearning.github.io/mslearn-ai-studio/Instructions/04-Use-own-data.html

C#:
// rm -r mslearn-ai-foundry -f
// git clone https://github.com/microsoftlearning/mslearn-ai-studio mslearn-ai-foundry
// cd mslearn-ai-foundry/labfiles/rag-app/c-sharp
// dotnet add package Azure.AI.OpenAI
// dotnet run

using System;
using Azure;
using System.IO;
using System.Text;
using System.Collections.Generic;
using Microsoft.Extensions.Configuration;
using Azure.AI.OpenAI;
using System.ClientModel;
using Azure.AI.OpenAI.Chat;
using OpenAI.Chat;

namespace rag_app
{
    class Program
    {
        static void Main(string[] args)
        {
            // Clear the console
            Console.Clear();

            try
            {
// {
//     "OPEN_AI_ENDPOINT": "https://ai-myhub1588559212155.openai.azure.com/",
//     "OPEN_AI_KEY": "31IQTmnEDSqTsGIrEighShdn3VJrFdVF78JD9fgBiPHrcjVy0aG2JQQJ99BF
        ACHYHv6XJ3w3AAAAACOGXGSQ",
//     "CHAT_MODEL": "gpt-4o",
//     "EMBEDDING_MODEL": "text-embedding-ada-002",
//     "SEARCH_ENDPOINT": "https://rg1aisearchservice1.search.windows.net",
//     "SEARCH_KEY": "3JfXmon3dnBbi9UDymp3kmO5fa1cPdGiQDiNG9xjcTAzSeBX8Wkm",
//     "INDEX_NAME": "brochures-index"
// }

                // Get config settings
                IConfigurationBuilder builder = new ConfigurationBuilder()
                .AddJsonFile("appsettings.json");
                IConfigurationRoot configuration = builder.Build();
                string open_ai_endpoint = configuration["OPEN_AI_ENDPOINT"];
                string open_ai_key = configuration["OPEN_AI_KEY"];
                string chat_model = configuration["CHAT_MODEL"];
                string embedding_model = configuration["EMBEDDING_MODEL"];
                string search_url = configuration["SEARCH_ENDPOINT"];
                string search_key = configuration["SEARCH_KEY"];
                string index_name = configuration["INDEX_NAME"];

                // Get an Azure OpenAI chat client
                AzureOpenAIClient azureClient = new(
                    new Uri(open_ai_endpoint),
                    new AzureKeyCredential(open_ai_key));
                ChatClient chatClient = azureClient.GetChatClient(chat_model);


                // Initialize prompt with system message
                var prompt = new List<ChatMessage>()
                {
                    new SystemChatMessage("You are a travel assistant that provides
                    information on travel services available from Margie's Travel.")
                };

                // Loop until the user types 'quit'
                string input_text = "";
                while (input_text.ToLower() != "quit")
                {
                    // Get user input
                    Console.WriteLine("Enter the prompt (or type 'quit' to exit):");
                    input_text = Console.ReadLine();

                    if (input_text.ToLower() != "quit")
                    {
                        // Add the user input message to the prompt
                        prompt.Add(new UserChatMessage(input_text));

                        // (DataSource is in preview and subject to breaking changes)
                        #pragma warning disable AOAI001

                        // Additional parameters to apply RAG pattern using the
                        AI Search index
                        ChatCompletionOptions options = new();
                        options.AddDataSource(new AzureSearchChatDataSource()
                        {
                            // The following params are used to search the index
                            Endpoint = new Uri(search_url),
                            IndexName = index_name,
                            Authentication = DataSourceAuthentication
                            .FromApiKey(search_key),
                            // The following params are used to vectorize the query
                            QueryType = "vector",
                            VectorizationSource = DataSourceVectorizer
                            .FromDeploymentName(embedding_model),
                        });

                        // Submit the prompt with the data source options and display
                        the response
                        ChatCompletion completion = chatClient.CompleteChat(prompt,
                        options);
                        var completionText = completion.Content[0].Text;
                        Console.WriteLine(completionText);

                        // Add the response to the chat history
                        prompt.Add(new AssistantChatMessage(completionText));

                        #pragma warning restore AOAI001
                    }
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
    }
}


Output:


Python:
# rm -r mslearn-ai-foundry -f
# git clone https://github.com/microsoftlearning/mslearn-ai-studio mslearn-ai-foundry
# cd mslearn-ai-foundry/labfiles/rag-app/python
# python -m venv labenv
# ./labenv/bin/Activate.ps1
# pip install -r requirements.txt openai
# code rag-app.py
# python rag-app.py

import os
from dotenv import load_dotenv
from openai import AzureOpenAI

def main():
    # Clear the console
    os.system('cls' if os.name == 'nt' else 'clear')

# OPEN_AI_ENDPOINT="https://ai-myhub1588559212155.openai.azure.com/"
# OPEN_AI_KEY="31IQTmnEDSqTsGIrEighShdn3VJrFdVF78JD9fgBiPHrcjVy0aG2
    JQQJ99BFACHYHv6XJ3w3AAAAACOGXGSQ"
# CHAT_MODEL="gpt-4o"
# EMBEDDING_MODEL="text-embedding-ada-002"
# SEARCH_ENDPOINT="https://rg1aisearchservice1.search.windows.net"
# SEARCH_KEY="3JfXmon3dnBbi9UDymp3kmO5fa1cPdGiQDiNG9xjcTAzSeBX8Wkm"
# INDEX_NAME="brochures-index"

    try:
        # Get configuration settings
        load_dotenv()
        open_ai_endpoint = os.getenv("OPEN_AI_ENDPOINT")
        open_ai_key = os.getenv("OPEN_AI_KEY")
        chat_model = os.getenv("CHAT_MODEL")
        embedding_model = os.getenv("EMBEDDING_MODEL")
        search_url = os.getenv("SEARCH_ENDPOINT")
        search_key = os.getenv("SEARCH_KEY")
        index_name = os.getenv("INDEX_NAME")

        # Get an Azure OpenAI chat client
        chat_client = AzureOpenAI(
            api_version = "2024-12-01-preview",
            azure_endpoint = open_ai_endpoint,
            api_key = open_ai_key
        )

        # Initialize prompt with system message
        prompt = [
            {"role": "system", "content": "You are a travel assistant that provides
            information on travel services available from Margie's Travel."}
        ]

        # Loop until the user types 'quit'
        while True:
            # Get input text
            input_text = input("Enter the prompt (or type 'quit' to exit): ")
            if input_text.lower() == "quit":
                break
            if len(input_text) == 0:
                print("Please enter a prompt.")
                continue

            # Add the user input message to the prompt
            prompt.append({"role": "user", "content": input_text})

            # Additional parameters to apply RAG pattern using the AI Search index
            rag_params = {
                "data_sources": [
                    {
                        # he following params are used to search the index
                        "type": "azure_search",
                        "parameters": {
                            "endpoint": search_url,
                            "index_name": index_name,
                            "authentication": {
                                "type": "api_key",
                                "key": search_key,
                            },
                            # The following params are used to vectorize the query
                            "query_type": "vector",
                            "embedding_dependency": {
                                "type": "deployment_name",
                                "deployment_name": embedding_model,
                            },
                        }
                    }
                ],
            }

            # Submit the prompt with the data source options and display the response
            response = chat_client.chat.completions.create(
                model=chat_model,
                messages=prompt,
                extra_body=rag_params
            )
            completion = response.choices[0].message.content
            print(completion)

            # Add the response to the chat history
            prompt.append({"role": "assistant", "content": completion})

    except Exception as ex:
        print(ex)

if __name__ == '__main__':
    main()

output:



Tuesday, June 24, 2025

Azure WebJob with Timer Trigger and Application Insights Logging in .NET 8

 Azure WebJob with Timer Trigger and Application Insights Logging in .NET 8

// dotnet add package Microsoft.Azure.WebJobs.Extensions
// dotnet add package Microsoft.Azure.WebJobs.Extensions.Timers
// dotnet add package Microsoft.Extensions.Hosting
// dotnet add package Microsoft.Extensions.Logging.ApplicationInsights

using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Azure.WebJobs;
using Microsoft.Extensions.Logging;
using Microsoft.ApplicationInsights.Extensibility;

var host = new HostBuilder()
    .ConfigureWebJobs(b =>
    {
        b.AddTimers();
    })
    .ConfigureLogging((context, b) =>
    {
        b.AddConsole();
        // Add Application Insights logging
        b.AddApplicationInsights(
            configureTelemetryConfiguration: (config) =>
            {
                // Optionally configure Telemetry here
            },
            configureApplicationInsightsLoggerOptions: (options) => { }
        );
    })
    .ConfigureServices((context, services) =>
    {
        services.AddSingleton<MyFunctions>();
        // Set Application Insights connection string or instrumentation key
        services.Configure<TelemetryConfiguration>((config) =>
        {
            //config.ConnectionString =
context.Configuration["APPLICATIONINSIGHTS_CONNECTION_STRING"];
            config.ConnectionString =
"InstrumentationKey=5f3e190f-999a-46f6-aea1-ddc46b8a9de9;IngestionEndpoint=
https://westus2-4.in.applicationinsights.azure.com/;
LiveEndpoint=https://westus2.livediagnostics.monitor.azure.com";
            // "InstrumentationKey=5f3e190f-999a-46f6-aea1-ddc46b8a9de9;
IngestionEndpoint=https://westus2-4.in.applicationinsights.azure.com/;
LiveEndpoint=https://westus2.livediagnostics.monitor.azure.com;
ApplicationId=528f3612-f614-4768-a707-1aeb6d61c551
        });
    })
    .Build();

host.Run();

public class MyFunctions
{
    private readonly ILogger<MyFunctions> _logger;

    public MyFunctions(ILogger<MyFunctions> logger)
    {
        _logger = logger;
    }

    public void ProcessTimer([TimerTrigger("0 */1 * * * *")] TimerInfo timerInfo)
    {
        _logger.LogInformation($"WebJob executed at: {DateTime.Now}");
    }
}

output:



Wednesday, June 18, 2025

Create a generative AI chat app

Create a generative AI chat app

https://microsoftlearning.github.io/mslearn-ai-studio/Instructions/02a-AI-foundry-sdk.html

1. Go to https://ai.azure.com/
2. Create a new Project and deploy 'gpt-4o' model

C# Code:
using System;
using Azure;
using System.IO;
using System.Text;
using System.Collections.Generic;
using Microsoft.Extensions.Configuration;
using Azure.Identity;
using Azure.AI.Projects;
using Azure.AI.Inference;

namespace chat_app
{
    class Program
    {
        static void Main(string[] args)
        {
            Console.Clear();
            try
            {
                IConfigurationBuilder builder =
                    new ConfigurationBuilder().AddJsonFile("appsettings.json");
                IConfigurationRoot configuration = builder.Build();
                string project_connection = "https://sree-2937-resource.services.ai.
                    azure.com/api/projects/sree-2937";
                string model_deployment = "my-gpt-4o-2";

                DefaultAzureCredentialOptions options = new()
                {
                    ExcludeEnvironmentCredential = true,
                    ExcludeManagedIdentityCredential = true
                };
                var projectClient = new AIProjectClient(
                     new Uri(project_connection),
                     new DefaultAzureCredential(options));

                ChatCompletionsClient chat = projectClient.GetChatCompletionsClient();

                var prompt = new List<ChatRequestMessage>(){
                 new ChatRequestSystemMessage("You are a helpful AI assistant that
                    answers questions.")
             };

                string input_text = "";
                while (input_text.ToLower() != "quit")
                {
                    Console.WriteLine("Enter the prompt (or type 'quit' to exit):");
                    input_text = Console.ReadLine();
                    if (input_text.ToLower() != "quit")
                    {
                        prompt.Add(new ChatRequestUserMessage(input_text));
                        var requestOptions = new ChatCompletionsOptions()
                        {
                            Model = model_deployment,
                            Messages = prompt
                        };

                        Response<ChatCompletions> response =
                            chat.Complete(requestOptions);
                        var completion = response.Value.Content;
                        Console.WriteLine(completion);
                        prompt.Add(new ChatRequestAssistantMessage(completion));
                    }
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
    }
}



OutPut:
1.  dotnet add package Azure.Identity
     dotnet add package Azure.AI.Projects --version 1.0.0-beta.9
     dotnet add package Azure.AI.Inference --version 1.0.0-beta.5

2. AZ LOGIN
3. dotnet run
4. 


Python Code:
import os
from dotenv import load_dotenv
from azure.identity import DefaultAzureCredential
from azure.ai.projects import AIProjectClient
from azure.ai.inference.models import SystemMessage, UserMessage, AssistantMessage

def main():

    os.system('cls' if os.name=='nt' else 'clear')
       
    try:
   
        load_dotenv()
        project_connection = "https://sree-2937-resource.services.ai.azure.com/api
            /projects/sree-2937"
        model_deployment =  "my-gpt-4o-2"
       
        projectClient = AIProjectClient(            
         credential=DefaultAzureCredential(
             exclude_environment_credential=True,
             exclude_managed_identity_credential=True
         ),
         endpoint=project_connection,
        )
       
        chat = projectClient.inference.get_chat_completions_client()

        prompt=[
         SystemMessage("You are a helpful AI assistant that answers questions.")
        ]

        while True:
            input_text = input("Enter the prompt (or type 'quit' to exit): ")
            if input_text.lower() == "quit":
                break
            if len(input_text) == 0:
                print("Please enter a prompt.")
                continue
           
            prompt.append(UserMessage(input_text))
            response = chat.complete(
                model=model_deployment,
                messages=prompt)
            completion = response.choices[0].message.content
            print(completion)
            prompt.append(AssistantMessage(completion))

    except Exception as ex:
        print(ex)

if __name__ == '__main__':
    main()

OutPut:
1.  python -m venv labenv
    ./labenv/bin/Activate.ps1
    pip install python-dotenv azure-identity azure-ai-projects azure-ai-inference

2. AZ LOGIN
3. python chat-app.py
4. 

Featured Post

Create a generative AI app that uses your own data

Create a generative AI app that uses your own data https://microsoftlearning.github.io/mslearn-ai-studio/Instructions/04-Use-own-data.html C...

Popular posts