libion: make tests work with Ion from common kernel.

The tests do not care about legacy ION interface and are intended to
concentrate for kernels 4.14+ (new API). Running these tests
successfully during changes in ION will keep us honest with the upcoming
heap-as-module changes in ION.

Test: ./ion-unit-tests on cuttlefish.
Bug: 133508579

Change-Id: I2a31d09d09deb5be9ae84c4981cdf4b3c2cceb4c
Signed-off-by: Sandeep Patil <sspatil@google.com>
This commit is contained in:
Sandeep Patil 2019-07-12 17:34:09 -07:00
parent 89ea28116a
commit f7f03c9e32
11 changed files with 411 additions and 1120 deletions

View file

@ -18,18 +18,15 @@ cc_test {
name: "ion-unit-tests",
cflags: [
"-g",
"-Wall",
"-Werror",
"-Wno-missing-field-initializers",
],
shared_libs: ["libion"],
srcs: [
"ion_test_fixture.cpp",
"allocate_test.cpp",
"formerly_valid_handle_test.cpp",
"invalid_values_test.cpp",
"map_test.cpp",
"device_test.cpp",
"exit_test.cpp",
"heap_query.cpp",
"invalid_values_test.cpp",
"ion_test_fixture.cpp",
"map_test.cpp",
],
}

View file

@ -14,95 +14,106 @@
* limitations under the License.
*/
#include <memory>
#include <sys/mman.h>
#include <memory>
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
class Allocate : public IonAllHeapsTest {
};
class Allocate : public IonTest {};
TEST_F(Allocate, Allocate)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Allocate, Allocate) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
ion_user_handle_t handle = 0;
ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, 0, &handle));
ASSERT_TRUE(handle != 0);
ASSERT_EQ(0, ion_free(m_ionFd, handle));
int fd;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &fd));
ASSERT_TRUE(fd != 0);
ASSERT_EQ(close(fd), 0); // free the buffer
}
}
}
TEST_F(Allocate, AllocateCached)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Allocate, AllocateCached) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
ion_user_handle_t handle = 0;
ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED, &handle));
ASSERT_TRUE(handle != 0);
ASSERT_EQ(0, ion_free(m_ionFd, handle));
int fd;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), ION_FLAG_CACHED, &fd));
ASSERT_TRUE(fd != 0);
ASSERT_EQ(close(fd), 0); // free the buffer
}
}
}
TEST_F(Allocate, AllocateCachedNeedsSync)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Allocate, AllocateCachedNeedsSync) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
ion_user_handle_t handle = 0;
ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED_NEEDS_SYNC, &handle));
ASSERT_TRUE(handle != 0);
ASSERT_EQ(0, ion_free(m_ionFd, handle));
int fd;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
ION_FLAG_CACHED_NEEDS_SYNC, &fd));
ASSERT_TRUE(fd != 0);
ASSERT_EQ(close(fd), 0); // free the buffer
}
}
}
TEST_F(Allocate, RepeatedAllocate)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Allocate, RepeatedAllocate) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
ion_user_handle_t handle = 0;
int fd;
for (unsigned int i = 0; i < 1024; i++) {
SCOPED_TRACE(::testing::Message() << "iteration " << i);
ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, 0, &handle));
ASSERT_TRUE(handle != 0);
ASSERT_EQ(0, ion_free(m_ionFd, handle));
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &fd));
ASSERT_TRUE(fd != 0);
ASSERT_EQ(close(fd), 0); // free the buffer
}
}
}
}
TEST_F(Allocate, Zeroed)
{
TEST_F(Allocate, Large) {
for (const auto& heap : ion_heaps) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
int fd;
ASSERT_EQ(-ENOMEM,
ion_alloc_fd(ionfd, 3UL * 1024 * 1024 * 1024, 0, (1 << heap.heap_id), 0, &fd));
}
}
// Make sure all heaps always return zeroed pages
TEST_F(Allocate, Zeroed) {
auto zeroes_ptr = std::make_unique<char[]>(4096);
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
for (const auto& heap : ion_heaps) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
int fds[16];
for (unsigned int i = 0; i < 16; i++) {
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, 0, &map_fd));
ASSERT_EQ(0, ion_alloc_fd(ionfd, 4096, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr = NULL;
void* ptr = NULL;
ptr = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
@ -116,13 +127,13 @@ TEST_F(Allocate, Zeroed)
ASSERT_EQ(0, close(fds[i]));
}
int newIonFd = ion_open();
int new_ionfd = ion_open();
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(newIonFd, 4096, 0, heapMask, 0, &map_fd));
ASSERT_EQ(0, ion_alloc_fd(new_ionfd, 4096, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr = NULL;
void* ptr = NULL;
ptr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
@ -130,14 +141,6 @@ TEST_F(Allocate, Zeroed)
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Allocate, Large)
{
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
ion_user_handle_t handle = 0;
ASSERT_EQ(-ENOMEM, ion_alloc(m_ionFd, 3UL*1024*1024*1024, 0, heapMask, 0, &handle));
ASSERT_EQ(0, ion_close(new_ionfd));
}
}

View file

@ -1,546 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fcntl.h>
#include <memory>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <linux/ion_test.h>
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
#define ALIGN(x,y) (((x) + ((y) - 1)) & ~((y) - 1))
class Device : public IonAllHeapsTest {
public:
virtual void SetUp();
virtual void TearDown();
int m_deviceFd;
void readDMA(int fd, void *buf, size_t size);
void writeDMA(int fd, void *buf, size_t size);
void readKernel(int fd, void *buf, size_t size);
void writeKernel(int fd, void *buf, size_t size);
void blowCache();
void dirtyCache(void *ptr, size_t size);
};
void Device::SetUp()
{
IonAllHeapsTest::SetUp();
m_deviceFd = open("/dev/ion-test", O_RDONLY);
ASSERT_GE(m_deviceFd, 0);
}
void Device::TearDown()
{
ASSERT_EQ(0, close(m_deviceFd));
IonAllHeapsTest::TearDown();
}
void Device::readDMA(int fd, void *buf, size_t size)
{
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
struct ion_test_rw_data ion_test_rw_data = {
.ptr = (uint64_t)buf,
.offset = 0,
.size = size,
.write = 0,
};
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_DMA_MAPPING, &ion_test_rw_data));
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
}
void Device::writeDMA(int fd, void *buf, size_t size)
{
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
struct ion_test_rw_data ion_test_rw_data = {
.ptr = (uint64_t)buf,
.offset = 0,
.size = size,
.write = 1,
};
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_DMA_MAPPING, &ion_test_rw_data));
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
}
void Device::readKernel(int fd, void *buf, size_t size)
{
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
struct ion_test_rw_data ion_test_rw_data = {
.ptr = (uint64_t)buf,
.offset = 0,
.size = size,
.write = 0,
};
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_KERNEL_MAPPING, &ion_test_rw_data));
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
}
void Device::writeKernel(int fd, void *buf, size_t size)
{
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, fd));
struct ion_test_rw_data ion_test_rw_data = {
.ptr = (uint64_t)buf,
.offset = 0,
.size = size,
.write = 1,
};
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_KERNEL_MAPPING, &ion_test_rw_data));
ASSERT_EQ(0, ioctl(m_deviceFd, ION_IOC_TEST_SET_FD, -1));
}
void Device::blowCache()
{
const size_t bigger_than_cache = 8*1024*1024;
void *buf1 = malloc(bigger_than_cache);
void *buf2 = malloc(bigger_than_cache);
memset(buf1, 0xaa, bigger_than_cache);
memcpy(buf2, buf1, bigger_than_cache);
free(buf1);
free(buf2);
}
void Device::dirtyCache(void *ptr, size_t size)
{
/* try to dirty cache lines */
for (size_t i = size-1; i > 0; i--) {
((volatile char *)ptr)[i];
((char *)ptr)[i] = i;
}
}
TEST_F(Device, KernelReadCached)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
for (int i = 0; i < 4096; i++)
((char *)ptr)[i] = i;
((char*)buf)[4096] = 0x12;
readKernel(map_fd, buf, 4096);
ASSERT_EQ(((char*)buf)[4096], 0x12);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)buf)[i]);
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, KernelWriteCached)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (int i = 0; i < 4096; i++)
((char *)buf)[i] = i;
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
dirtyCache(ptr, 4096);
writeKernel(map_fd, buf, 4096);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, DMAReadCached)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
for (int i = 0; i < 4096; i++)
((char *)ptr)[i] = i;
readDMA(map_fd, buf, 4096);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)buf)[i]);
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, DMAWriteCached)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (int i = 0; i < 4096; i++)
((char *)buf)[i] = i;
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
dirtyCache(ptr, 4096);
writeDMA(map_fd, buf, 4096);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, KernelReadCachedNeedsSync)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
for (int i = 0; i < 4096; i++)
((char *)ptr)[i] = i;
((char*)buf)[4096] = 0x12;
readKernel(map_fd, buf, 4096);
ASSERT_EQ(((char*)buf)[4096], 0x12);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)buf)[i]);
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, KernelWriteCachedNeedsSync)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (int i = 0; i < 4096; i++)
((char *)buf)[i] = i;
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
dirtyCache(ptr, 4096);
writeKernel(map_fd, buf, 4096);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, DMAReadCachedNeedsSync)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
for (int i = 0; i < 4096; i++)
((char *)ptr)[i] = i;
ion_sync_fd(m_ionFd, map_fd);
readDMA(map_fd, buf, 4096);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)buf)[i]);
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, DMAWriteCachedNeedsSync)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (int i = 0; i < 4096; i++)
((char *)buf)[i] = i;
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
dirtyCache(ptr, 4096);
writeDMA(map_fd, buf, 4096);
ion_sync_fd(m_ionFd, map_fd);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, KernelRead)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = 0;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
for (int i = 0; i < 4096; i++)
((char *)ptr)[i] = i;
((char*)buf)[4096] = 0x12;
readKernel(map_fd, buf, 4096);
ASSERT_EQ(((char*)buf)[4096], 0x12);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)buf)[i]);
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, KernelWrite)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (int i = 0; i < 4096; i++)
((char *)buf)[i] = i;
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = 0;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
dirtyCache(ptr, 4096);
writeKernel(map_fd, buf, 4096);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, DMARead)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = 0;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
for (int i = 0; i < 4096; i++)
((char *)ptr)[i] = i;
readDMA(map_fd, buf, 4096);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)buf)[i]);
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, DMAWrite)
{
auto alloc_ptr = std::make_unique<char[]>(8192 + 1024);
void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024);
for (int i = 0; i < 4096; i++)
((char *)buf)[i] = i;
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = 0;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
dirtyCache(ptr, 4096);
writeDMA(map_fd, buf, 4096);
for (int i = 0; i < 4096; i++)
ASSERT_EQ((char)i, ((char *)ptr)[i]) << i;
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Device, IsCached)
{
auto buf_ptr = std::make_unique<char[]>(4096);
void *buf = buf_ptr.get();
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
dirtyCache(ptr, 4096);
readDMA(map_fd, buf, 4096);
bool same = true;
for (int i = 4096-16; i >= 0; i -= 16)
if (((char *)buf)[i] != i)
same = false;
ASSERT_FALSE(same);
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
}
}

View file

@ -22,206 +22,206 @@
#include "ion_test_fixture.h"
class Exit : public IonAllHeapsTest {
};
class Exit : public IonTest {};
TEST_F(Exit, WithAlloc)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Exit, WithAllocFd) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT({
ion_user_handle_t handle = 0;
EXPECT_EXIT(
{
int handle_fd = -1;
ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, 0, &handle));
ASSERT_TRUE(handle != 0);
exit(0);
}, ::testing::ExitedWithCode(0), "");
ASSERT_EQ(0,
ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &handle_fd));
ASSERT_NE(-1, handle_fd);
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithAllocFd)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT({
int handle_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, 0, &handle_fd));
ASSERT_NE(-1, handle_fd);
exit(0);
}, ::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithRepeatedAllocFd)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Exit, WithRepeatedAllocFd) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
for (unsigned int i = 0; i < 1024; i++) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
ASSERT_EXIT({
int handle_fd = -1;
ASSERT_EXIT(
{
int handle_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, 0, &handle_fd));
ASSERT_NE(-1, handle_fd);
exit(0);
}, ::testing::ExitedWithCode(0), "")
<< "failed on heap " << heapMask
<< " and size " << size
<< " on iteration " << i;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0,
&handle_fd));
ASSERT_NE(-1, handle_fd);
exit(0);
},
::testing::ExitedWithCode(0), "")
<< "failed on heap " << heap.name << ":" << heap.type << ":" << heap.heap_id
<< " and size " << size << " on iteration " << i;
}
}
}
}
TEST_F(Exit, WithMapping)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Exit, WithMapping) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT({
int map_fd = -1;
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, 0, &map_fd));
ASSERT_GE(map_fd, 0);
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
exit(0);
}, ::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithPartialMapping)
{
static const size_t allocationSizes[] = {64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT({
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, 0, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, munmap(ptr, size / 2));
exit(0);
}, ::testing::ExitedWithCode(0), "");
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithMappingCached)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Exit, WithPartialMapping) {
static const size_t allocationSizes[] = {64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT({
int map_fd = -1;
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED, &map_fd));
ASSERT_GE(map_fd, 0);
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
exit(0);
}, ::testing::ExitedWithCode(0), "");
}
}
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
}
TEST_F(Exit, WithPartialMappingCached)
{
static const size_t allocationSizes[] = {64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT({
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, munmap(ptr, size / 2));
exit(0);
}, ::testing::ExitedWithCode(0), "");
ASSERT_EQ(0, munmap(ptr, size / 2));
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithMappingNeedsSync)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Exit, WithMappingCached) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT({
int map_fd = -1;
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC, &map_fd));
ASSERT_GE(map_fd, 0);
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
ION_FLAG_CACHED, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
exit(0);
}, ::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithPartialMappingNeedsSync)
{
static const size_t allocationSizes[] = {64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT({
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, munmap(ptr, size / 2));
exit(0);
}, ::testing::ExitedWithCode(0), "");
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithPartialMappingCached) {
static const size_t allocationSizes[] = {64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
ION_FLAG_CACHED, &map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, munmap(ptr, size / 2));
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithMappingNeedsSync) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC,
&map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithPartialMappingNeedsSync) {
static const size_t allocationSizes[] = {64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC,
&map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, munmap(ptr, size / 2));
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}

View file

@ -1,63 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/mman.h>
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
class FormerlyValidHandle : public IonTest {
public:
virtual void SetUp();
virtual void TearDown();
ion_user_handle_t m_handle;
};
void FormerlyValidHandle::SetUp()
{
IonTest::SetUp();
ASSERT_EQ(0, ion_alloc(m_ionFd, 4096, 0, 1/* ion_env->m_firstHeap */, 0, &m_handle));
ASSERT_TRUE(m_handle != 0);
ASSERT_EQ(0, ion_free(m_ionFd, m_handle));
}
void FormerlyValidHandle::TearDown()
{
m_handle = 0;
}
TEST_F(FormerlyValidHandle, free)
{
ASSERT_EQ(-EINVAL, ion_free(m_ionFd, m_handle));
}
TEST_F(FormerlyValidHandle, map)
{
int map_fd;
unsigned char *ptr;
ASSERT_EQ(-EINVAL, ion_map(m_ionFd, m_handle, 4096, PROT_READ, 0, 0, &ptr, &map_fd));
}
TEST_F(FormerlyValidHandle, share)
{
int share_fd;
ASSERT_EQ(-EINVAL, ion_share(m_ionFd, m_handle, &share_fd));
}

View file

@ -0,0 +1,27 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "ion_test_fixture.h"
class HeapQuery : public IonTest {};
TEST_F(HeapQuery, AtleastOneHeap) {
ASSERT_GT(ion_heaps.size(), 0);
}
// TODO: Check if we expect some of the default
// heap types to be present on all devices.

View file

@ -16,171 +16,71 @@
#include <sys/mman.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
class InvalidValues : public IonAllHeapsTest {
public:
virtual void SetUp();
virtual void TearDown();
ion_user_handle_t m_validHandle;
int m_validShareFd;
ion_user_handle_t const m_badHandle = -1;
};
class InvalidValues : public IonTest {};
void InvalidValues::SetUp()
{
IonAllHeapsTest::SetUp();
ASSERT_EQ(0, ion_alloc(m_ionFd, 4096, 0, m_firstHeap, 0, &m_validHandle))
<< m_ionFd << " " << m_firstHeap;
ASSERT_TRUE(m_validHandle != 0);
ASSERT_EQ(0, ion_share(m_ionFd, m_validHandle, &m_validShareFd));
}
void InvalidValues::TearDown()
{
ASSERT_EQ(0, ion_free(m_ionFd, m_validHandle));
ASSERT_EQ(0, close(m_validShareFd));
m_validHandle = 0;
IonAllHeapsTest::TearDown();
}
TEST_F(InvalidValues, ion_close)
{
TEST_F(InvalidValues, ion_close) {
EXPECT_EQ(-EBADF, ion_close(-1));
}
TEST_F(InvalidValues, ion_alloc)
{
ion_user_handle_t handle;
/* invalid ion_fd */
int ret = ion_alloc(0, 4096, 0, m_firstHeap, 0, &handle);
EXPECT_TRUE(ret == -EINVAL || ret == -ENOTTY);
/* invalid ion_fd */
EXPECT_EQ(-EBADF, ion_alloc(-1, 4096, 0, m_firstHeap, 0, &handle));
/* no heaps */
EXPECT_EQ(-ENODEV, ion_alloc(m_ionFd, 4096, 0, 0, 0, &handle));
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
/* zero size */
EXPECT_EQ(-EINVAL, ion_alloc(m_ionFd, 0, 0, heapMask, 0, &handle));
/* too large size */
EXPECT_EQ(-EINVAL, ion_alloc(m_ionFd, -1, 0, heapMask, 0, &handle));
/* bad alignment */
EXPECT_EQ(-EINVAL, ion_alloc(m_ionFd, 4096, -1, heapMask, 0, &handle));
/* NULL handle */
EXPECT_EQ(-EINVAL, ion_alloc(m_ionFd, 4096, 0, heapMask, 0, NULL));
}
}
TEST_F(InvalidValues, ion_alloc_fd)
{
TEST_F(InvalidValues, ion_alloc_fd) {
int fd;
/* invalid ion_fd */
int ret = ion_alloc_fd(0, 4096, 0, m_firstHeap, 0, &fd);
EXPECT_TRUE(ret == -EINVAL || ret == -ENOTTY);
/* invalid ion_fd */
EXPECT_EQ(-EBADF, ion_alloc_fd(-1, 4096, 0, m_firstHeap, 0, &fd));
/* no heaps */
EXPECT_EQ(-ENODEV, ion_alloc_fd(m_ionFd, 4096, 0, 0, 0, &fd));
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
/* zero size */
EXPECT_EQ(-EINVAL, ion_alloc_fd(m_ionFd, 0, 0, heapMask, 0, &fd));
/* too large size */
EXPECT_EQ(-EINVAL, ion_alloc_fd(m_ionFd, -1, 0, heapMask, 0, &fd));
/* bad alignment */
EXPECT_EQ(-EINVAL, ion_alloc_fd(m_ionFd, 4096, -1, heapMask, 0, &fd));
/* NULL handle */
EXPECT_EQ(-EINVAL, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, 0, NULL));
// no heaps
EXPECT_EQ(-ENODEV, ion_alloc_fd(ionfd, 4096, 0, 0, 0, &fd));
for (const auto& heap : ion_heaps) {
// invalid ion_fd
int ret = ion_alloc_fd(0, 4096, 0, (1 << heap.heap_id), 0, &fd);
EXPECT_TRUE(ret == -EINVAL || ret == -ENOTTY);
// invalid ion_fd
EXPECT_EQ(-EBADF, ion_alloc_fd(-1, 4096, 0, (1 << heap.heap_id), 0, &fd));
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
// zero size
EXPECT_EQ(-EINVAL, ion_alloc_fd(ionfd, 0, 0, (1 << heap.heap_id), 0, &fd));
// too large size
EXPECT_EQ(-EINVAL, ion_alloc_fd(ionfd, -1, 0, (1 << heap.heap_id), 0, &fd));
// bad alignment
// TODO: Current userspace and kernel code completely ignores alignment. So this
// test is going to fail. We need to completely remove alignment from the API.
// All memory by default is always page aligned. OR actually pass the alignment
// down into the kernel and make kernel respect the alignment.
// EXPECT_EQ(-EINVAL, ion_alloc_fd(ionfd, 4096, -1, (1 << heap.heap_id), 0, &fd));
// NULL fd
EXPECT_EQ(-EINVAL, ion_alloc_fd(ionfd, 4096, 0, (1 << heap.heap_id), 0, nullptr));
}
}
TEST_F(InvalidValues, ion_free)
{
/* invalid ion fd */
int ret = ion_free(0, m_validHandle);
EXPECT_TRUE(ret == -EINVAL || ret == -ENOTTY);
/* invalid ion fd */
EXPECT_EQ(-EBADF, ion_free(-1, m_validHandle));
/* zero handle */
EXPECT_EQ(-EINVAL, ion_free(m_ionFd, 0));
/* bad handle */
EXPECT_EQ(-EINVAL, ion_free(m_ionFd, m_badHandle));
TEST_F(InvalidValues, ion_query_heap_cnt) {
// NULL count
EXPECT_EQ(-EINVAL, ion_query_heap_cnt(ionfd, nullptr));
int heap_count;
// bad fd
EXPECT_EQ(-EBADF, ion_query_heap_cnt(-1, &heap_count));
}
TEST_F(InvalidValues, ion_map)
{
int map_fd;
unsigned char *ptr;
TEST_F(InvalidValues, ion_query_get_heaps) {
int heap_count;
ASSERT_EQ(0, ion_query_heap_cnt(ionfd, &heap_count));
ASSERT_GT(heap_count, 0);
/* invalid ion fd */
int ret = ion_map(0, m_validHandle, 4096, PROT_READ, 0, 0, &ptr, &map_fd);
EXPECT_TRUE(ret == -EINVAL || ret == -ENOTTY);
/* invalid ion fd */
EXPECT_EQ(-EBADF, ion_map(-1, m_validHandle, 4096, PROT_READ, 0, 0, &ptr, &map_fd));
/* zero handle */
EXPECT_EQ(-EINVAL, ion_map(m_ionFd, 0, 4096, PROT_READ, 0, 0, &ptr, &map_fd));
/* bad handle */
EXPECT_EQ(-EINVAL, ion_map(m_ionFd, m_badHandle, 4096, PROT_READ, 0, 0, &ptr, &map_fd));
/* zero length */
EXPECT_EQ(-EINVAL, ion_map(m_ionFd, m_validHandle, 0, PROT_READ, 0, 0, &ptr, &map_fd));
/* bad prot */
EXPECT_EQ(-EINVAL, ion_map(m_ionFd, m_validHandle, 4096, -1, 0, 0, &ptr, &map_fd));
/* bad offset */
EXPECT_EQ(-EINVAL, ion_map(m_ionFd, m_validHandle, 4096, PROT_READ, 0, -1, &ptr, &map_fd));
/* NULL ptr */
EXPECT_EQ(-EINVAL, ion_map(m_ionFd, m_validHandle, 4096, PROT_READ, 0, 0, NULL, &map_fd));
/* NULL map_fd */
EXPECT_EQ(-EINVAL, ion_map(m_ionFd, m_validHandle, 4096, PROT_READ, 0, 0, &ptr, NULL));
}
TEST_F(InvalidValues, ion_share)
{
int share_fd;
/* invalid ion fd */
int ret = ion_share(0, m_validHandle, &share_fd);
EXPECT_TRUE(ret == -EINVAL || ret == -ENOTTY);
/* invalid ion fd */
EXPECT_EQ(-EBADF, ion_share(-1, m_validHandle, &share_fd));
/* zero handle */
EXPECT_EQ(-EINVAL, ion_share(m_ionFd, 0, &share_fd));
/* bad handle */
EXPECT_EQ(-EINVAL, ion_share(m_ionFd, m_badHandle, &share_fd));
/* NULL share_fd */
EXPECT_EQ(-EINVAL, ion_share(m_ionFd, m_validHandle, NULL));
}
TEST_F(InvalidValues, ion_import)
{
ion_user_handle_t handle;
/* invalid ion fd */
int ret = ion_import(0, m_validShareFd, &handle);
EXPECT_TRUE(ret == -EINVAL || ret == -ENOTTY);
/* invalid ion fd */
EXPECT_EQ(-EBADF, ion_import(-1, m_validShareFd, &handle));
/* bad share_fd */
EXPECT_EQ(-EINVAL, ion_import(m_ionFd, 0, &handle));
/* invalid share_fd */
EXPECT_EQ(-EBADF, ion_import(m_ionFd, -1, &handle));
/* NULL handle */
EXPECT_EQ(-EINVAL, ion_import(m_ionFd, m_validShareFd, NULL));
}
TEST_F(InvalidValues, ion_sync_fd)
{
/* invalid ion fd */
int ret = ion_sync_fd(0, m_validShareFd);
EXPECT_TRUE(ret == -EINVAL || ret == -ENOTTY);
/* invalid ion fd */
EXPECT_EQ(-EBADF, ion_sync_fd(-1, m_validShareFd));
/* bad share_fd */
EXPECT_EQ(-EINVAL, ion_sync_fd(m_ionFd, 0));
/* invalid share_fd */
EXPECT_EQ(-EBADF, ion_sync_fd(m_ionFd, -1));
// nullptr buffers, still returns success but without
// the ion_heap_data.
EXPECT_EQ(0, ion_query_get_heaps(ionfd, heap_count, nullptr));
std::unique_ptr<struct ion_heap_data[]> heaps =
std::make_unique<struct ion_heap_data[]>(heap_count);
// bad fd
EXPECT_EQ(-EBADF, ion_query_get_heaps(-1, heap_count, heaps.get()));
// invalid heap data pointer
EXPECT_EQ(-EFAULT, ion_query_get_heaps(ionfd, heap_count, reinterpret_cast<void*>(0xdeadf00d)));
}

50
libion/tests/ion_4.12.h Normal file
View file

@ -0,0 +1,50 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
*** To edit the content of this header, modify the corresponding
*** source file (e.g. under external/kernel-headers/original/) then
*** run bionic/libc/kernel/tools/update_all.py
***
*** Any manual change here will be lost the next time this script will
*** be run. You've been warned!
***
****************************************************************************
****************************************************************************/
#ifndef _UAPI_LINUX_ION_NEW_H
#define _UAPI_LINUX_ION_NEW_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
struct ion_new_allocation_data {
__u64 len;
__u32 heap_id_mask;
__u32 flags;
__u32 fd;
__u32 unused;
};
#define MAX_HEAP_NAME 32
struct ion_heap_data {
char name[MAX_HEAP_NAME];
__u32 type;
__u32 heap_id;
__u32 reserved0;
__u32 reserved1;
__u32 reserved2;
};
struct ion_heap_query {
__u32 cnt;
__u32 reserved0;
__u64 heaps;
__u32 reserved1;
__u32 reserved2;
};
#define ION_IOC_MAGIC 'I'
#define ION_IOC_NEW_ALLOC _IOWR(ION_IOC_MAGIC, 0, struct ion_new_allocation_data)
#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, struct ion_heap_query)
#endif

View file

@ -15,59 +15,26 @@
*/
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
IonTest::IonTest() : m_ionFd(-1)
{
}
IonTest::IonTest() : ionfd(-1), ion_heaps() {}
void IonTest::SetUp() {
m_ionFd = ion_open();
ASSERT_GE(m_ionFd, 0);
ionfd = ion_open();
ASSERT_GE(ionfd, 0);
int heap_count;
int ret = ion_query_heap_cnt(ionfd, &heap_count);
ASSERT_EQ(ret, 0);
ASSERT_GT(heap_count, 0);
ion_heaps.resize(heap_count, {});
ret = ion_query_get_heaps(ionfd, heap_count, ion_heaps.data());
ASSERT_EQ(ret, 0);
}
void IonTest::TearDown() {
ion_close(m_ionFd);
}
IonAllHeapsTest::IonAllHeapsTest() :
m_firstHeap(0),
m_lastHeap(0),
m_allHeaps()
{
}
void IonAllHeapsTest::SetUp() {
int fd = ion_open();
ASSERT_GE(fd, 0);
for (int i = 1; i != 0; i <<= 1) {
ion_user_handle_t handle = 0;
int ret;
ret = ion_alloc(fd, 4096, 0, i, 0, &handle);
if (ret == 0 && handle != 0) {
ion_free(fd, handle);
if (!m_firstHeap) {
m_firstHeap = i;
}
m_lastHeap = i;
m_allHeaps.push_back(i);
} else {
ASSERT_EQ(-ENODEV, ret);
}
}
ion_close(fd);
EXPECT_NE(0U, m_firstHeap);
EXPECT_NE(0U, m_lastHeap);
RecordProperty("Heaps", m_allHeaps.size());
IonTest::SetUp();
}
void IonAllHeapsTest::TearDown() {
IonTest::TearDown();
ion_close(ionfd);
}

View file

@ -18,29 +18,19 @@
#define ION_TEST_FIXTURE_H_
#include <gtest/gtest.h>
#include <vector>
#include "ion_4.12.h"
using ::testing::Test;
class IonTest : public virtual Test {
public:
public:
IonTest();
virtual ~IonTest() {};
virtual void SetUp();
virtual void TearDown();
int m_ionFd;
};
class IonAllHeapsTest : public IonTest {
public:
IonAllHeapsTest();
virtual ~IonAllHeapsTest() {};
virtual ~IonTest(){};
virtual void SetUp();
virtual void TearDown();
unsigned int m_firstHeap;
unsigned int m_lastHeap;
std::vector<unsigned int> m_allHeaps;
int ionfd;
std::vector<struct ion_heap_data> ion_heaps;
};
#endif /* ION_TEST_FIXTURE_H_ */

View file

@ -15,61 +15,30 @@
*/
#include <sys/mman.h>
#include <unistd.h>
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
class Map : public IonAllHeapsTest {
};
class Map : public IonTest {};
TEST_F(Map, MapHandle)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Map, MapFd) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message() << "size " << size);
ion_user_handle_t handle = 0;
ASSERT_EQ(0, ion_alloc(m_ionFd, size, 0, heapMask, 0, &handle));
ASSERT_TRUE(handle != 0);
int map_fd = -1;
unsigned char *ptr = NULL;
ASSERT_EQ(0, ion_map(m_ionFd, handle, size, PROT_READ | PROT_WRITE, MAP_SHARED, 0, &ptr, &map_fd));
ASSERT_TRUE(ptr != NULL);
ASSERT_GE(map_fd, 0);
ASSERT_EQ(0, close(map_fd));
ASSERT_EQ(0, ion_free(m_ionFd, handle));
memset(ptr, 0xaa, size);
ASSERT_EQ(0, munmap(ptr, size));
}
}
}
TEST_F(Map, MapFd)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, 0, &map_fd));
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, close(map_fd));
memset(ptr, 0xaa, size);
@ -79,53 +48,51 @@ TEST_F(Map, MapFd)
}
}
TEST_F(Map, MapOffset)
{
for (unsigned int heapMask : m_allHeaps) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
TEST_F(Map, MapOffset) {
for (const auto& heap : ion_heaps) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, PAGE_SIZE * 2, 0, heapMask, 0, &map_fd));
ASSERT_EQ(0, ion_alloc_fd(ionfd, getpagesize() * 2, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
unsigned char *ptr;
ptr = (unsigned char *)mmap(NULL, PAGE_SIZE * 2, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
unsigned char* ptr;
ptr = (unsigned char*)mmap(NULL, getpagesize() * 2, PROT_READ | PROT_WRITE, MAP_SHARED,
map_fd, 0);
ASSERT_TRUE(ptr != NULL);
memset(ptr, 0, PAGE_SIZE);
memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
memset(ptr, 0, getpagesize());
memset(ptr + getpagesize(), 0xaa, getpagesize());
ASSERT_EQ(0, munmap(ptr, PAGE_SIZE * 2));
ASSERT_EQ(0, munmap(ptr, getpagesize() * 2));
ptr = (unsigned char *)mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, PAGE_SIZE);
ptr = (unsigned char*)mmap(NULL, getpagesize(), PROT_READ | PROT_WRITE, MAP_SHARED, map_fd,
getpagesize());
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(ptr[0], 0xaa);
ASSERT_EQ(ptr[PAGE_SIZE - 1], 0xaa);
ASSERT_EQ(0, munmap(ptr, PAGE_SIZE));
ASSERT_EQ(ptr[getpagesize() - 1], 0xaa);
ASSERT_EQ(0, munmap(ptr, getpagesize()));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Map, MapCached)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Map, MapCached) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, flags, &map_fd));
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, close(map_fd));
memset(ptr, 0xaa, size);
@ -135,23 +102,22 @@ TEST_F(Map, MapCached)
}
}
TEST_F(Map, MapCachedNeedsSync)
{
static const size_t allocationSizes[] = {4*1024, 64*1024, 1024*1024, 2*1024*1024};
for (unsigned int heapMask : m_allHeaps) {
TEST_F(Map, MapCachedNeedsSync) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message() << "heap " << heapMask);
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
ASSERT_EQ(0, ion_alloc_fd(m_ionFd, size, 0, heapMask, flags, &map_fd));
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), flags, &map_fd));
ASSERT_GE(map_fd, 0);
void *ptr;
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, close(map_fd));
memset(ptr, 0xaa, size);