https://microsoftlearning.github.io/mslearn-ai-studio/Instructions/02a-AI-foundry-sdk.html
1. Go to https://ai.azure.com/
2. Create a new Project and deploy 'gpt-4o' model
C# Code:
using System;
using Azure;
using System.IO;
using System.Text;
using System.Collections.Generic;
using Microsoft.Extensions.Configuration;
using Azure.Identity;
using Azure.AI.Projects;
using Azure.AI.Inference;
namespace chat_app
{
class Program
{
static void Main(string[] args)
{
Console.Clear();
try
{
IConfigurationBuilder builder =
new ConfigurationBuilder().AddJsonFile("appsettings.json");
IConfigurationRoot configuration = builder.Build();
string project_connection = "https://sree-2937-resource.services.ai.
azure.com/api/projects/sree-2937";
string model_deployment = "my-gpt-4o-2";
DefaultAzureCredentialOptions options = new()
{
ExcludeEnvironmentCredential = true,
ExcludeManagedIdentityCredential = true
};
var projectClient = new AIProjectClient(
new Uri(project_connection),
new DefaultAzureCredential(options));
ChatCompletionsClient chat = projectClient.GetChatCompletionsClient();
var prompt = new List<ChatRequestMessage>(){
new ChatRequestSystemMessage("You are a helpful AI assistant that
answers questions.")
};
string input_text = "";
while (input_text.ToLower() != "quit")
{
Console.WriteLine("Enter the prompt (or type 'quit' to exit):");
input_text = Console.ReadLine();
if (input_text.ToLower() != "quit")
{
prompt.Add(new ChatRequestUserMessage(input_text));
var requestOptions = new ChatCompletionsOptions()
{
Model = model_deployment,
Messages = prompt
};
Response<ChatCompletions> response =
chat.Complete(requestOptions);
var completion = response.Value.Content;
Console.WriteLine(completion);
prompt.Add(new ChatRequestAssistantMessage(completion));
}
}
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
}
}
}
OutPut:
1. dotnet add package Azure.Identity
1. dotnet add package Azure.Identity
dotnet add package Azure.AI.Projects --version 1.0.0-beta.9
dotnet add package Azure.AI.Inference --version 1.0.0-beta.5
2. AZ LOGIN
3. dotnet run
Python Code:
import os
from dotenv import load_dotenv
from azure.identity import DefaultAzureCredential
from azure.ai.projects import AIProjectClient
from azure.ai.inference.models import SystemMessage, UserMessage, AssistantMessage
def main():
os.system('cls' if os.name=='nt' else 'clear')
try:
load_dotenv()
project_connection = "https://sree-2937-resource.services.ai.azure.com/api
/projects/sree-2937"
model_deployment = "my-gpt-4o-2"
projectClient = AIProjectClient(
credential=DefaultAzureCredential(
exclude_environment_credential=True,
exclude_managed_identity_credential=True
),
endpoint=project_connection,
)
chat = projectClient.inference.get_chat_completions_client()
prompt=[
SystemMessage("You are a helpful AI assistant that answers questions.")
]
while True:
input_text = input("Enter the prompt (or type 'quit' to exit): ")
if input_text.lower() == "quit":
break
if len(input_text) == 0:
print("Please enter a prompt.")
continue
prompt.append(UserMessage(input_text))
response = chat.complete(
model=model_deployment,
messages=prompt)
completion = response.choices[0].message.content
print(completion)
prompt.append(AssistantMessage(completion))
except Exception as ex:
print(ex)
if __name__ == '__main__':
main()
OutPut:
1. python -m venv labenv
1. python -m venv labenv
./labenv/bin/Activate.ps1
pip install python-dotenv azure-identity azure-ai-projects azure-ai-inference
2. AZ LOGIN
3. python chat-app.py
No comments:
Post a Comment