Based on our last project, we are going to use all that we are learned to create a Mini Game where the NPC will be managed by ML-Agents.
This project works without a conda environment, since we will use a previously created file containing the result of a learning process.
Ml-Agents script:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.UI;
using Unity.MLAgents;
using Unity.MLAgents.Sensors;
public class HumanoidML : Agent
{
[Header("Velocidad")]
[Range(0f, 5f)]
public float _speed;
[Header("Velocidad de giro")]
[Range(50f, 300f)]
public float _turnSpeed;
public bool _training = true;
private Rigidbody _rb;
[SerializeField]
private Transform _target;
private Animator _anim;
private Vector3 _previous;
public Text _contadorText = null;
public float _puntos = 0;
public override void Initialize()
{
_rb = GetComponent<Rigidbody>();
_anim = GetComponent<Animator>();
_previous = transform.position;
//MaxStep forma parte de la clase Agent
if (!_training) MaxStep = 0;
}
public override void OnEpisodeBegin()
{
_rb.velocity = Vector3.zero;
_rb.angularVelocity = Vector3.zero;
MoverPosicionInicial();
_previous = transform.position;
}
public override void OnActionReceived(float[] vectorAction)
{
float lForward = vectorAction[0];
float lTurn = 0;
if (vectorAction[1] == 1)
{
lTurn = -1;
}
else if (vectorAction[1] == 2)
{
lTurn = 1;
}
_rb.MovePosition(transform.position +
transform.forward * lForward * _speed * Time.deltaTime);
transform.Rotate(transform.up * lTurn * _turnSpeed * Time.deltaTime);
}
public void Update()
{
float velocity = ((transform.position - _previous).magnitude) / Time.deltaTime;
_previous = transform.position;
_anim.SetFloat("multiplicador", velocity);
}
public override void Heuristic(float[] actionsOut)
{
float lForward = 0f;
float lTurn = 0f;
if (Input.GetKey(KeyCode.UpArrow))
{
lForward = 1f;
}
if (Input.GetKey(KeyCode.LeftArrow))
{
lTurn = 1f;
}
else if (Input.GetKey(KeyCode.RightArrow))
{
lTurn = 2f;
}
// Put the actions into an array and return
actionsOut[0] = lForward;
actionsOut[1] = lTurn;
}
public override void CollectObservations(VectorSensor sensor)
{
//Distance to the target.
sensor.AddObservation(
Vector3.Distance(_target.transform.position, transform.position));
//Direction to the target.
sensor.AddObservation(
(_target.transform.position - transform.position).normalized);
//Character's Forward vector.
//Vector de 3 posiciones.
sensor.AddObservation(
transform.forward);
}
private void OnTriggerStay(Collider other)
{
if (true)
{
if (other.CompareTag("target"))
{
DaPremio(0.5f);
}
if (other.CompareTag("borders"))
{
DaPremio(-0.05f);
}
}
}
private void DaPremio(float premio)
{
AddReward(premio);
if (_contadorText != null)
{
_puntos += premio;
_contadorText.text = _puntos.ToString();
}
}
private void MoverPosicionInicial()
{
bool posicionEncontrada = false;
int intentos = 100;
Vector3 posicionPotencial = Vector3.zero;
while (!posicionEncontrada || intentos >= 0)
{
intentos--;
posicionPotencial = new Vector3(
transform.parent.position.x + UnityEngine.Random.Range(-3f, 3f),
0.555f,
transform.parent.position.z + UnityEngine.Random.Range(-3f, 3f));
Collider[] colliders = Physics.OverlapSphere(posicionPotencial, 0.5f);
if (colliders.Length == 0)
{
transform.position = posicionPotencial;
posicionEncontrada = true;
}
}
}
}
As you can see we have a lot of possibilities when we talk about NPC creation. We can use different configurations files to create ml-agents with different behaviours. Some more trained, some less trained. Or some trained to reach the same target, but in a different way.
We have seen just the most basic of ml-agents, but in a short time and now we have a solid knowledge base to start exploring the world of Artificial Intelligence using Machine Learning in UNITY to improve our games.