I can post the entire source file, and show you how I'm using it:
use crate::vertex::Vertex;
use gl::types::*;
use std::mem;
use std::os::raw::c_void;
#[derive(Debug, Copy, Clone)]
pub enum BufferKind {
Vertex,
Index,
}
impl BufferKind {
pub fn to_raw_enum(&self) -> GLenum {
match self {
BufferKind::Vertex => gl::ARRAY_BUFFER,
BufferKind::Index => gl::ELEMENT_ARRAY_BUFFER,
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum BufferMode {
StaticDraw,
StaticRead,
StaticCopy,
DynamicDraw,
DynamicRead,
DynamicCopy,
StreamDraw,
StreamRead,
StreamCopy,
}
impl BufferMode {
pub fn to_raw_enum(&self) -> GLenum {
match self {
BufferMode::StaticDraw => gl::STATIC_DRAW,
BufferMode::StaticRead => gl::STATIC_READ,
BufferMode::StaticCopy => gl::STATIC_COPY,
BufferMode::DynamicDraw => gl::DYNAMIC_DRAW,
BufferMode::DynamicRead => gl::DYNAMIC_READ,
BufferMode::DynamicCopy => gl::DYNAMIC_COPY,
BufferMode::StreamDraw => gl::STREAM_DRAW,
BufferMode::StreamRead => gl::STREAM_READ,
BufferMode::StreamCopy => gl::STREAM_COPY,
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum AttributeKind {
Byte,
Short,
Int,
UnsignedByte,
UnsignedShort,
UnsignedInt,
Half,
Float,
Double,
Fixed,
}
impl AttributeKind {
pub fn to_raw_enum(&self) -> GLenum {
match self {
AttributeKind::Byte => gl::BYTE,
AttributeKind::Short => gl::SHORT,
AttributeKind::Int => gl::INT,
AttributeKind::UnsignedByte => gl::UNSIGNED_BYTE,
AttributeKind::UnsignedShort => gl::UNSIGNED_SHORT,
AttributeKind::UnsignedInt => gl::UNSIGNED_INT,
AttributeKind::Half => gl::HALF_FLOAT,
AttributeKind::Float => gl::FLOAT,
AttributeKind::Double => gl::DOUBLE,
AttributeKind::Fixed => gl::FIXED,
}
}
pub fn size(&self) -> usize {
match self {
AttributeKind::Byte => mem::size_of::<GLchar>(),
AttributeKind::Short => mem::size_of::<GLshort>(),
AttributeKind::Int => mem::size_of::<GLint>(),
AttributeKind::UnsignedByte => mem::size_of::<GLbyte>(),
AttributeKind::UnsignedShort => mem::size_of::<GLushort>(),
AttributeKind::UnsignedInt => mem::size_of::<GLuint>(),
AttributeKind::Half => mem::size_of::<GLhalf>(),
AttributeKind::Float => mem::size_of::<GLfloat>(),
AttributeKind::Double => mem::size_of::<GLdouble>(),
AttributeKind::Fixed => mem::size_of::<GLfixed>(),
}
}
}
pub enum PrimitiveKind {
Points,
Triangles,
TriangleFan,
TriangleStrip,
}
impl PrimitiveKind {
pub fn to_raw_enum(&self) -> GLenum {
match self {
PrimitiveKind::Points => gl::POINTS,
PrimitiveKind::Triangles => gl::TRIANGLES,
PrimitiveKind::TriangleFan => gl::TRIANGLE_FAN,
PrimitiveKind::TriangleStrip => gl::TRIANGLE_STRIP,
}
}
}
pub struct VBO {
mode: BufferMode,
primitive_kind: PrimitiveKind,
handle: GLuint,
index_count: usize,
vertex_count: usize,
}
impl VBO {
pub fn make<T: Vertex>(mode: BufferMode, primitive_kind: PrimitiveKind, vertices: &Vec::<T>, indices: Option<&Vec::<u16>>) -> VBO {
let mut index_count = 0;
let handle = unsafe {
let mut vao = 0;
gl::GenVertexArrays(1, &mut vao);
gl::BindVertexArray(vao);
vao
};
VBO::build_vertex_buffer(mode, &vertices);
if let Some(list) = indices {
index_count = list.len();
VBO::build_index_buffer(list);
}
unsafe { gl::BindVertexArray(0) };
VBO {
mode,
primitive_kind,
handle,
index_count,
vertex_count: vertices.len(),
}
}
fn build_vertex_buffer<T: Vertex>(mode: BufferMode, vertices: &Vec::<T>) -> GLuint {
let stride = mem::size_of::<T>() as GLsizei;
let total_size = (vertices.len() * stride as usize) as GLsizeiptr;
let root_ptr = &vertices[0] as *const T as *const c_void;
unsafe {
let mut vbo = 0;
let mut offset = 0;
gl::GenBuffers(1, &mut vbo);
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
gl::BufferData(gl::ARRAY_BUFFER, total_size, root_ptr, mode.to_raw_enum());
for (i, attr) in T::attrs().iter().enumerate() {
let offset_ptr = offset as *const c_void;
let normalized = match attr.0 {
false => gl::FALSE,
true => gl::TRUE,
};
gl::EnableVertexAttribArray(i as u32);
gl::VertexAttribPointer(
i as GLuint,
attr.1 as GLint,
attr.2.to_raw_enum(),
normalized,
stride,
offset_ptr,
);
offset += attr.2.size() * attr.1;
}
vbo
}
}
fn build_index_buffer(indices: &Vec::<u16>) -> GLuint {
let total_size = (indices.len() * 2) as GLsizeiptr;
let root_ptr = &indices[0] as *const u16 as *const c_void;
unsafe {
let mut ibo = 0;
gl::GenBuffers(1, &mut ibo);
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ibo);
gl::BufferData(gl::ELEMENT_ARRAY_BUFFER, total_size, root_ptr, gl::STATIC_DRAW);
ibo as GLuint
}
}
fn write<T: Sized>(&self, kind: BufferKind, vertices: &Vec::<T>, offset: usize) {
let size = mem::size_of::<T>() as isize;
let offset = offset as isize * size;
let total_size = vertices.len() as isize * size;
let root_ptr = &vertices[0] as *const T as *const c_void;
let raw_kind = kind.to_raw_enum();
println!("offset: {} total_size: {}", offset, total_size);
unsafe {
gl::BindVertexArray(self.handle);
gl::BindBuffer(raw_kind, self.handle);
gl::BufferSubData(raw_kind, offset, total_size, root_ptr);
};
}
pub fn mode(&self) -> BufferMode {
self.mode
}
pub fn write_vertices<T: Vertex>(&self, vertices: &Vec::<T>, offset: usize) {
self.write(BufferKind::Vertex, vertices, offset);
}
pub fn write_indices<T: Vertex>(&self, indices: &Vec::<u16>, offset: usize) {
self.write(BufferKind::Index, indices, offset);
}
pub fn draw(&self) {
let kind = self.primitive_kind.to_raw_enum();
unsafe {
gl::BindVertexArray(self.handle);
if self.index_count > 0 {
let root_ptr = 0 as *const u16 as *const c_void;
gl::DrawElements(kind, self.index_count as i32, gl::UNSIGNED_SHORT, root_ptr);
} else {
gl::DrawArrays(kind, 0, self.vertex_count as i32);
}
gl::BindVertexArray(0);
};
}
}
impl Drop for VBO {
fn drop(&mut self) {
unsafe { gl::DeleteVertexArrays(1, &self.handle) };
self.handle = 0;
}
}
Here's how the VBO is being instantiated in this particular case:
let vbo = VBO::make(BufferMode::DyanmicDraw, PrimitiveKind::Triangles, &vertices, Some(&indices));
The VBO was designed to only take vertex structs, so I can use interleaved arrays. I'm using the TextureVertex struct which looks like this:
#[repr(C, packed)]
#[derive(Copy, Clone, Debug)]
pub struct TextureVertex {
pub pos: Vector2,
pub coord: Vector2,
}
impl TextureVertex {
pub fn make(x: f32, y: f32, u: f32, v: f32) -> TextureVertex {
TextureVertex {
pos: Vector2::make(x, y),
coord: Vector2::make(u, v),
}
}
pub fn make_from_parts(pos: Vector2, coord: Vector2) -> TextureVertex {
TextureVertex { pos, coord }
}
}
impl Vertex for TextureVertex {
fn attrs() -> Vec<(bool, usize, AttributeKind)> {
vec![
(false, 2, AttributeKind::Float),
(false, 2, AttributeKind::Float),
]
}
fn new() -> TextureVertex {
TextureVertex {
pos: Vector2::new(),
coord: Vector2::new(),
}
}
}
All vertex structs must conform to the Vertex trait I created (it's like an interface/abstract class in other languages). It requires this attrs() method (it's at the struct-level, so think of it as a static method) to be defined which returns the vertex attribute array details.
Everything renders correctly. Eventually, my tileset renderer will call my VBO's write_vertices() method on it like so:
self.anim_vbo.write_vertices(&self.anim_verts, 0);
The self.anim_verts variable is a vector containing the original vertex data that was used to initially fill the VBO. There's this animation function that's called periodically that'll update the U-coordinate of all vertices for my animated tiles, and then call write_vertices to update the VBO. Nothing appears to change onscreen whenever the VBO is updated. I've also confirmed that the animation function is getting called multiple time a second, and with the correct values too, so it looks like the run-loop is fine.