Skip to content

Commit

Permalink
update ci to include windows (#99)
Browse files Browse the repository at this point in the history
* update ci to include windows

* update boost catch install

* add hdf5 from vcpkg

* update shell

* test vcpkg

* temporarily remove other os for testing

* test commands

* test commands

* use installed vcpkg

* update variable

* update command

* integrate vcpkg

* update presets and workflow

* update hdf5 install

* readd boost and catch

* update boost

* fix string length in spec

* fix timeseries statement

* fix formatting

* adjust chunk size

* remove unused variables

* update reader to use non-platform specific sleep

* fix old relative path

* add status returns to writeElectricalSeries and writeSpikeEventSeries

* link against bcrypt on windows

* fix warnings

* fix casting warnings

* fix bcrypt linking

* fix formatting

* readd macos and ubuntu testing

* fix formatting

* readd other os

* use conditional compilation for reader executable path

* update exe path

* fix casting

* inspect directory in workflow

* fix shadowed variables

* fix casting

* use full path for testing

* fix warnings

* update files for testing

* fix warnings

* inspect contents

* update workflow

* update path

* test file permissions

* Update testRecordingWorkflow.cpp due to error in merge

* Update testWorkflowExamples.cpp due to error in merge

* update io tests

* fix formatting

* update reader file locking

* fix formatting

* update test for windows case

* fix formatting workflow

* add warning to swmr docs

---------

Co-authored-by: Oliver Ruebel <oruebel@users.noreply.github.com>
  • Loading branch information
stephprince and oruebel authored Dec 18, 2024
1 parent 186f207 commit 264c893
Show file tree
Hide file tree
Showing 26 changed files with 217 additions and 95 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v4
- name: Lint
run: cmake -D FORMAT_COMMAND=clang-format-14 -P cmake/lint.cmake
run: cmake -D FORMAT_COMMAND=clang-format -P cmake/lint.cmake
9 changes: 8 additions & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [macos-latest, ubuntu-latest]
os: [macos-latest, ubuntu-latest, windows-latest]

runs-on: ${{ matrix.os }}

Expand All @@ -43,6 +43,13 @@ jobs:
- name: Install dependencies - macos
if: matrix.os == 'macos-latest'
run: brew install hdf5 boost catch2

- name: Install dependencies - windows
if: matrix.os == 'windows-latest'
run: |
cd "${VCPKG_INSTALLATION_ROOT}"
vcpkg install hdf5[cpp]:x64-windows boost-date-time:x64-windows boost-endian:x64-windows boost-uuid:x64-windows catch2:x64-windows
vcpkg integrate install
- name: Configure
shell: pwsh
Expand Down
3 changes: 3 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,9 @@ find_package(Boost REQUIRED)
include_directories(${Boost_INCLUDE_DIRS})

target_link_libraries(aqnwb_aqnwb ${HDF5_CXX_LIBRARIES} ${Boost_LIBRARIES})
if (WIN32)
target_link_libraries(aqnwb_aqnwb bcrypt)
endif()

# ---- Install rules ----

Expand Down
3 changes: 3 additions & 0 deletions CMakePresets.json
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,9 @@
"inherits": ["flags-msvc", "ci-std"],
"generator": "Visual Studio 17 2022",
"architecture": "x64",
"cacheVariables": {
"CMAKE_TOOLCHAIN_FILE": "C:/vcpkg/scripts/buildsystems/vcpkg.cmake"
},
"hidden": true
},
{
Expand Down
5 changes: 5 additions & 0 deletions docs/pages/userdocs/hdf5io.dox
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,11 @@
* }
* \enddot
*
* \warning
* There are known issues using SWMR mode on Windows due to file locking by the reader processes. One workaround
* is to set the environment variable `HDF5_USE_FILE_LOCKING=FALSE` to prevent file access errors when using
* a writer process with other reader processes.
*
* \subsection hdf5io_swmr_features Why does AqNWB use SMWR mode?
*
* Using SWMR has several key advantages for data acquisition applications:
Expand Down
61 changes: 51 additions & 10 deletions resources/generate_spec_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# load file
yaml = YAML(typ='safe')
with open(file) as f:
namespace = yaml.load(file)
namespace = yaml.load(f)

# get all the sources
for i, ns in enumerate(namespace['namespaces']):
Expand All @@ -22,8 +22,8 @@
header_file = Path(f"./src/spec/{ns['name'].replace('-', '_')}.hpp").resolve()
with open(header_file, 'w') as fo:
fo.write('#pragma once\n\n')
fo.write('#include <string>\n#include <string_view>\n#include <array>\n\n')
fo.write(f'namespace AQNWB::SPEC::{ns['name'].upper().replace('-', '_')}\n{{\n\n')
fo.write('#include <array>\n#include <string>\n#include <string_view>\n\n')
fo.write(f'namespace AQNWB::SPEC::{ns["name"].upper().replace("-", "_")}\n{{\n\n')
fo.write(f'const std::string version = "{ns["version"]}";\n\n')

# load and convert schema files
Expand All @@ -33,14 +33,55 @@
# load file
schema_file = file.parent / s['source']
with open(schema_file) as f:
spec = yaml.load(schema_file)
spec = yaml.load(f)

# convert to cpp string
print(f'Generating file {header_file} - {s["source"]}')
with open(header_file, 'a') as fo:
json_str = json.dumps(spec, separators=(',', ':'))
chunk_size = 16000 # Adjust the chunk size as needed
if len(json_str) > chunk_size:
# Split string into chunks if needed
chunks = [json_str[i:i + chunk_size] for i in range(0, len(json_str), chunk_size)]

var_name = s['source'].replace('.yaml', '').replace('.', '_')
fo.write(f'constexpr std::string_view {var_name} = R"delimiter(\n{json.dumps(spec, separators=(',', ':'))})delimiter";\n\n')
chunk_var_names = []
for i, chunk in enumerate(chunks):
chunk_var_name = f'{var_name}_part{i}'
with open(header_file, 'a') as fo:
fo.write(f'constexpr std::string_view {chunk_var_name} = R"delimiter({chunk})delimiter";\n')
chunk_var_names.append(chunk_var_name)

# Concatenate chunks at compile-time
with open(header_file, 'a') as fo:
fo.write(f'constexpr std::array<std::string_view, {len(chunks)}> {var_name}_parts = {{{", ".join(chunk_var_names)}}};\n')
fo.write(f'constexpr std::size_t {var_name}_total_length = []() {{\n')
fo.write(f' std::size_t length = 0;\n')
fo.write(f' for (const auto& part : {var_name}_parts) {{\n')
fo.write(f' length += part.size();\n')
fo.write(f' }}\n')
fo.write(f' return length;\n')
fo.write(f'}}();\n')
fo.write(f'constexpr auto {var_name}_concatenate = []() {{\n')
fo.write(f' std::array<char, {var_name}_total_length + 1> result{{}};\n')
fo.write(f' std::size_t pos = 0;\n')
fo.write(f' for (const auto& part : {var_name}_parts) {{\n')
fo.write(f' for (char c : part) {{\n')
fo.write(f' result[pos++] = c;\n')
fo.write(f' }}\n')
fo.write(f' }}\n')
fo.write(f' result[pos] = \'\\0\';\n')
fo.write(f' return result;\n')
fo.write(f'}}();\n')
fo.write(f'constexpr std::string_view {var_name}({var_name}_concatenate.data(), {var_name}_concatenate.size() - 1);\n\n')
var_names.append(var_name)
else:
with open(header_file, 'a') as fo:
var_name = s['source'].replace('.yaml', '').replace('.', '_')
fo.write(f'constexpr std::string_view {var_name} = R"delimiter(\n{json.dumps(spec, separators=(',', ':'))})delimiter";\n\n')
var_names.append(var_name)




# reformat schema sources for namespace file
schema = list()
Expand All @@ -49,12 +90,12 @@
s = {'source': s['source'].split('.yaml')[0]}
schema.append(s)
ns['schema'] = schema

# convert to cpp variables
ns_output = {'namespaces': [ns]}
with open(header_file, 'a') as fo:
fo.write(f'constexpr std::string_view namespaces = R"delimiter(\n{json.dumps(ns_output, separators=(',', ':'))})delimiter";\n\n')
fo.write(f'constexpr std::array<std::pair<std::string_view, std::string_view>, {len(var_names) + 1}> specVariables {{{{\n')
fo.write(''.join([f' {{"{name.replace('_', '.')}", {name}}},\n' for name in var_names]))
fo.write(f'constexpr std::array<std::pair<std::string_view, std::string_view>, {len(var_names) + 1}>\n specVariables {{{{\n')
fo.write(''.join([f' {{"{name.replace("_", ".")}", {name}}},\n' for name in var_names]))
fo.write(' {"namespace", namespaces}\n')
fo.write(f'}}}};\n}} // namespace AQNWB::SPEC::{ns['name'].upper().replace('-', '_')}\n')
fo.write(f'}}}};\n}} // namespace AQNWB::SPEC::{ns["name"].upper().replace("-", "_")}\n')
9 changes: 5 additions & 4 deletions src/Utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,16 +82,17 @@ inline std::shared_ptr<BaseIO> createIO(const std::string& type,
*/
inline void convertFloatToInt16LE(const float* source,
void* dest,
int numSamples)
SizeType numSamples)
{
// TODO - several steps in this function may be unnecessary for our use
// case. Consider simplifying the intermediate cast to char and the
// final cast to uint16_t.
auto maxVal = static_cast<double>(0x7fff);
auto intData = static_cast<char*>(dest);

for (int i = 0; i < numSamples; ++i) {
auto clampedValue = std::clamp(maxVal * source[i], -maxVal, maxVal);
for (SizeType i = 0; i < numSamples; ++i) {
auto clampedValue =
std::clamp(maxVal * static_cast<double>(source[i]), -maxVal, maxVal);
auto intValue =
static_cast<uint16_t>(static_cast<int16_t>(std::round(clampedValue)));
intValue = boost::endian::native_to_little(intValue);
Expand All @@ -114,7 +115,7 @@ inline std::unique_ptr<int16_t[]> transformToInt16(SizeType numSamples,
std::unique_ptr<int16_t[]> intData = std::make_unique<int16_t[]>(numSamples);

// copy data and multiply by scaling factor
double multFactor = 1 / (32767.0f * conversion_factor);
float multFactor = 1.0f / (32767.0f * conversion_factor);
std::transform(data,
data + numSamples,
scaledData.get(),
Expand Down
34 changes: 20 additions & 14 deletions src/hdf5/HDF5IO.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,14 @@ Status HDF5IO::createReferenceDataSet(
delete[] rdata;

herr_t dsetStatus = H5Dclose(dset);
if (checkStatus(dsetStatus) == Status::Failure) {
return Status::Failure;
}

herr_t spaceStatus = H5Sclose(space);
if (checkStatus(spaceStatus) == Status::Failure) {
return Status::Failure;
}

return checkStatus(writeStatus);
}
Expand Down Expand Up @@ -631,20 +638,19 @@ HDF5RecordingData::HDF5RecordingData(std::unique_ptr<H5::DataSet> data)
DataSpace dSpace = data->getSpace();
DSetCreatPropList prop = data->getCreatePlist();

int nDimensions = dSpace.getSimpleExtentNdims();
std::vector<hsize_t> dims(nDimensions), chunk(nDimensions);
this->nDimensions = static_cast<SizeType>(dSpace.getSimpleExtentNdims());
std::vector<hsize_t> dims(this->nDimensions), chunk(this->nDimensions);

nDimensions = dSpace.getSimpleExtentDims(
dims.data()); // TODO -redefine here or use original?
prop.getChunk(static_cast<int>(nDimensions), chunk.data());
this->nDimensions =
static_cast<SizeType>(dSpace.getSimpleExtentDims(dims.data()));
prop.getChunk(static_cast<int>(this->nDimensions), chunk.data());

this->size = std::vector<SizeType>(nDimensions);
for (int i = 0; i < nDimensions; ++i) {
this->size[i] = dims[i];
this->size = std::vector<SizeType>(this->nDimensions);
for (SizeType i = 0; i < this->nDimensions; ++i) {
this->size[i] = static_cast<SizeType>(dims[i]);
}
this->nDimensions = nDimensions;
this->position = std::vector<SizeType>(
nDimensions, 0); // Initialize position with 0 for each dimension
this->nDimensions, 0); // Initialize position with 0 for each dimension
m_dataset = std::make_unique<H5::DataSet>(*data);
}

Expand Down Expand Up @@ -672,7 +678,7 @@ Status HDF5RecordingData::writeDataBlock(

// Ensure that we have enough space to accommodate new data
std::vector<hsize_t> dSetDims(nDimensions), offset(nDimensions);
for (int i = 0; i < nDimensions; ++i) {
for (SizeType i = 0; i < nDimensions; ++i) {
offset[i] = static_cast<hsize_t>(positionOffset[i]);

if (dataShape[i] + offset[i] > size[i]) // TODO - do I need offset here
Expand All @@ -687,14 +693,14 @@ Status HDF5RecordingData::writeDataBlock(
// Set size to new size based on updated dimensionality
DataSpace fSpace = m_dataset->getSpace();
fSpace.getSimpleExtentDims(dSetDims.data());
for (int i = 0; i < nDimensions; ++i) {
for (SizeType i = 0; i < nDimensions; ++i) {
size[i] = dSetDims[i];
}

// Create memory space with the shape of the data
// DataSpace mSpace(dimension, dSetDim.data());
std::vector<hsize_t> dataDims(nDimensions);
for (int i = 0; i < nDimensions; ++i) {
for (SizeType i = 0; i < nDimensions; ++i) {
if (dataShape[i] == 0) {
dataDims[i] = 1;
} else {
Expand All @@ -711,7 +717,7 @@ Status HDF5RecordingData::writeDataBlock(
m_dataset->write(data, nativeType, mSpace, fSpace);

// Update position for simple extension
for (int i = 0; i < dataShape.size(); ++i) {
for (SizeType i = 0; i < dataShape.size(); ++i) {
position[i] += dataShape[i];
}
} catch (DataSetIException error) {
Expand Down
5 changes: 3 additions & 2 deletions src/nwb/RecordingContainers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ Status RecordingContainers::writeElectricalSeriesData(
if (es == nullptr)
return Status::Failure;

es->writeChannel(channel.getLocalIndex(), numSamples, data, timestamps);
return es->writeChannel(
channel.getLocalIndex(), numSamples, data, timestamps);
}

Status RecordingContainers::writeSpikeEventData(const SizeType& containerInd,
Expand All @@ -77,5 +78,5 @@ Status RecordingContainers::writeSpikeEventData(const SizeType& containerInd,
if (ses == nullptr)
return Status::Failure;

ses->writeSpike(numSamples, numChannels, data, timestamps);
return ses->writeSpike(numSamples, numChannels, data, timestamps);
}
12 changes: 6 additions & 6 deletions src/nwb/base/TimeSeries.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,25 +55,25 @@ void TimeSeries::initialize()

Status TimeSeries::writeData(const std::vector<SizeType>& dataShape,
const std::vector<SizeType>& positionOffset,
const void* data,
const void* timestamps)
const void* dataInput,
const void* timestampsInput)
{
Status tsStatus = Status::Success;
if (timestamps != nullptr) {
if (timestampsInput != nullptr) {
const std::vector<SizeType> timestampsShape = {
dataShape[0]}; // timestamps should match shape of the first data
// dimension
const std::vector<SizeType> timestampsPositionOffset = {positionOffset[0]};
tsStatus = this->timestamps->writeDataBlock(timestampsShape,
timestampsPositionOffset,
this->timestampsType,
timestamps);
timestampsInput);
}

Status dataStatus = this->data->writeDataBlock(
dataShape, positionOffset, this->dataType, data);
dataShape, positionOffset, this->dataType, dataInput);

if ((dataStatus != Status::Success) or (tsStatus != Status::Success)) {
if ((dataStatus != Status::Success) || (tsStatus != Status::Success)) {
return Status::Failure;
} else {
return Status::Success;
Expand Down
8 changes: 4 additions & 4 deletions src/nwb/base/TimeSeries.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,16 +51,16 @@ class TimeSeries : public Container
* @brief Writes a timeseries data block to the file.
* @param dataShape The size of the data block.
* @param positionOffset The position of the data block to write to.
* @param data A pointer to the data block.
* @param timestamps A pointer to the timestamps block. May be null if
* @param dataInput A pointer to the data block.
* @param timestampsInput A pointer to the timestamps block. May be null if
* multidimensional TimeSeries and only need to write the timestamps once but
* write data in separate blocks.
* @return The status of the write operation.
*/
Status writeData(const std::vector<SizeType>& dataShape,
const std::vector<SizeType>& positionOffset,
const void* data,
const void* timestamps = nullptr);
const void* dataInput,
const void* timestampsInput = nullptr);

/**
* @brief Initializes the TimeSeries by creating NWB related attributes and
Expand Down
10 changes: 5 additions & 5 deletions src/nwb/ecephys/ElectricalSeries.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ void ElectricalSeries::initialize()
TimeSeries::initialize();

// setup variables based on number of channels
std::vector<int> electrodeInds(channelVector.size());
std::vector<SizeType> electrodeInds(channelVector.size());
std::vector<float> channelConversions(channelVector.size());
for (size_t i = 0; i < channelVector.size(); ++i) {
electrodeInds[i] = channelVector[i].getGlobalIndex();
Expand Down Expand Up @@ -86,8 +86,8 @@ void ElectricalSeries::initialize()

Status ElectricalSeries::writeChannel(SizeType channelInd,
const SizeType& numSamples,
const void* data,
const void* timestamps)
const void* dataInput,
const void* timestampsInput)
{
// get offsets and datashape
std::vector<SizeType> dataShape = {
Expand All @@ -100,8 +100,8 @@ Status ElectricalSeries::writeChannel(SizeType channelInd,

// write channel data
if (channelInd == 0) {
return writeData(dataShape, positionOffset, data, timestamps);
return writeData(dataShape, positionOffset, dataInput, timestampsInput);
} else {
return writeData(dataShape, positionOffset, data);
return writeData(dataShape, positionOffset, dataInput);
}
}
8 changes: 4 additions & 4 deletions src/nwb/ecephys/ElectricalSeries.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,14 +60,14 @@ class ElectricalSeries : public TimeSeries
* @brief Writes a channel to an ElectricalSeries dataset.
* @param channelInd The channel index within the ElectricalSeries
* @param numSamples The number of samples to write (length in time).
* @param data A pointer to the data block.
* @param timestamps A pointer to the timestamps block.
* @param dataInput A pointer to the data block.
* @param timestampsInput A pointer to the timestamps block.
* @return The status of the write operation.
*/
Status writeChannel(SizeType channelInd,
const SizeType& numSamples,
const void* data,
const void* timestamps);
const void* dataInput,
const void* timestampsInput);

/**
* @brief Channel group that this time series is associated with.
Expand Down
Loading

0 comments on commit 264c893

Please sign in to comment.