You're right. The last dimension was not allocated using malloc, so I had to change the type to vector<vector<float *>>.
Thanks for your insight. I am serious about trying to do this the best way. I appreciate everyone's recommendations.
int read_data_fly(char* datafile, int dtype, double* data, float** probs,
int num_samples_use, int* keepsamps, int start, int end,
int* keeppreds_use, gzFile inputgz, size_t current,
int num_samples, int num_preds, int genskip, int genheaders,
int genprobs, size_t* bgen_indexes, double missingvalue,
double threshold, double minprob, int nonsnp,
int maxthreads)
{
int thread;
int threadstart;
int threadend;
int threadlength;
float*** threadprobs;
vector<vector<float*>> threadprobs3_;
// this is only temporary
dtype = 2;
maxthreads = 1;
probs = malloc(sizeof(float*) * 2);
num_samples_use = 1000;
const size_t probs_size = num_samples_use;
probs[0] = malloc(sizeof(float) * probs_size);
probs[1] = malloc(sizeof(float) * probs_size);
float val = 0.0f;
for (size_t i = 0; i < probs_size; i++)
{
val += 1.0f;
probs[0][i] = val;
probs[1][i] = val;
}
if (dtype == 1 || dtype == 2 || dtype == 3 ||
dtype == 4) // can read in parallel
{
threadlength = (end - start - 1) / maxthreads + 1;
if (dtype == 2)
{
threadprobs3_.resize(maxthreads);
//threadprobs = malloc(sizeof(float**) * maxthreads);
}
#pragma omp parallel for private(thread, threadstart, threadend) schedule(static, 1)
for (thread = 0; thread < maxthreads; thread++) {
;
threadstart = start + thread * threadlength;
threadend = start + (thread + 1) * threadlength;
if (threadend > end) {
threadend = end;
}
if (dtype == 1) {
read_bed_fly(
datafile, data + (size_t)(threadstart - start) * num_samples_use,
num_samples_use, keepsamps, threadend - threadstart,
keeppreds_use + threadstart, num_samples, num_preds, missingvalue);
}
if (dtype == 2) {
if (probs == NULL) {
read_bgen_fly(datafile,
data + (size_t)(threadstart - start) * num_samples_use,
NULL, num_samples_use, keepsamps, threadstart,
threadend, keeppreds_use, num_samples, num_preds,
bgen_indexes, missingvalue, threshold, minprob);
}
else{
//threadprobs_[thread] = malloc(sizeof(float*) * 2);
threadprobs3_[thread].resize(2);
cout << "ptr usage ahead" << endl;
//threadprobs[thread][0] = probs[0] + (size_t)(threadstart - start) * num_samples_use;
//threadprobs[thread][1] = probs[1] + (size_t)(threadstart - start) * num_samples_use;
((float**)&threadprobs3_[thread])[0] = probs[0] + (size_t)(threadstart - start) * num_samples_use;
((float**)&threadprobs3_[thread])[1] = probs[1] + (size_t)(threadstart - start) * num_samples_use;
//for (size_t x = 0; x < maxthreads; x++)
//{
// for (size_t y = 0; y < 2; y++)
// {
// for (size_t z = 0; z < probs_size; z++)
// {
// cout << ((float**)&threadprobs3_[x])[y][z] << endl;;
// }
// }
//}
cout << "ptr usage done" << endl;
read_bgen_fly(
datafile, data + (size_t)(threadstart - start) * num_samples_use,
&threadprobs3_[thread][0], num_samples_use, keepsamps, threadstart,
threadend, keeppreds_use, num_samples, num_preds, bgen_indexes,
missingvalue, threshold, minprob);
//free(threadprobs[thread]);
threadprobs3_[thread].clear();
}
}
if (dtype == 3) {
read_sped_fly(
datafile, data + (size_t)(threadstart - start) * num_samples_use,
num_samples_use, keepsamps, threadstart, threadend, keeppreds_use,
num_samples, num_preds, missingvalue, threshold, nonsnp);
}
if (dtype == 4) {
read_speed_fly(
datafile, data + (size_t)(threadstart - start) * num_samples_use,
num_samples_use, keepsamps, threadstart, threadend, keeppreds_use,
num_samples, num_preds, missingvalue, threshold, nonsnp);
}
}
if (dtype == 2) {
//free(threadprobs);
threadprobs3_.clear();
}
}
if (dtype == 5) {
(void)read_gen_fly(datafile, data, probs, num_samples_use, keepsamps, start,
end, keeppreds_use, inputgz, current, num_samples,
num_preds, genskip, genheaders, genprobs, missingvalue,
threshold, minprob, nonsnp);
}
//free(probs[0]);// = malloc(sizeof(float) * num_samples_use * 1000);
//free(probs[1]);// = malloc(sizeof(float) * num_samples_use * 1000);
//free(probs);// = malloc(sizeof(float*) * 2);
return (current + end);
}