Keep our byte-writing within bounds.

Now that we're writing both 16-bit and 32-bit integers, it's starting to
matter a little more how we slam even scalars into memory. This is maybe
not the fastest way to accomplish this, and I'm not crazy about the way
GLType works in general, but it does have the virtues of clarity and
expediency.
This commit is contained in:
Par Winzell 2018-03-27 11:26:50 -07:00
parent 93d1385b1e
commit f6ce7e345d
1 changed files with 13 additions and 1 deletions

View File

@ -65,7 +65,19 @@ struct GLType {
unsigned int byteStride() const { return componentType.size * count; } unsigned int byteStride() const { return componentType.size * count; }
void write(uint8_t *buf, const float scalar) const { *((float *) buf) = scalar; } void write(uint8_t *buf, const float scalar) const { *((float *) buf) = scalar; }
void write(uint8_t *buf, const uint32_t scalar) const { *((uint32_t *) buf) = scalar; } void write(uint8_t *buf, const uint32_t scalar) const {
switch(componentType.size) {
case 1:
*buf = (uint8_t)scalar;
break;
case 2:
*((uint16_t *) buf) = (uint16_t)scalar;
break;
case 4:
*((uint32_t *) buf) = scalar;
break;
}
}
template<class T, int d> template<class T, int d>
void write(uint8_t *buf, const mathfu::Vector<T, d> &vector) const { void write(uint8_t *buf, const mathfu::Vector<T, d> &vector) const {