|
| 1 | +/* |
| 2 | +Copyright (c) 2020, Blue Brain Project |
| 3 | +All rights reserved. |
| 4 | +
|
| 5 | +Redistribution and use in source and binary forms, with or without modification, |
| 6 | +are permitted provided that the following conditions are met: |
| 7 | +1. Redistributions of source code must retain the above copyright notice, |
| 8 | + this list of conditions and the following disclaimer. |
| 9 | +2. Redistributions in binary form must reproduce the above copyright notice, |
| 10 | + this list of conditions and the following disclaimer in the documentation |
| 11 | + and/or other materials provided with the distribution. |
| 12 | +3. Neither the name of the copyright holder nor the names of its contributors |
| 13 | + may be used to endorse or promote products derived from this software |
| 14 | + without specific prior written permission. |
| 15 | +
|
| 16 | +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 17 | +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 18 | +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 19 | +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
| 20 | +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 21 | +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 22 | +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 23 | +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 24 | +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 25 | +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
| 26 | +THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | +*/ |
| 28 | + |
| 29 | +#include "coreneuron/coreneuron.hpp" |
| 30 | +#include "coreneuron/io/setup_fornetcon.hpp" |
| 31 | +#include "coreneuron/network/netcon.hpp" |
| 32 | +#include "coreneuron/nrniv/nrniv_decl.h" |
| 33 | +#include <map> |
| 34 | + |
| 35 | +namespace coreneuron { |
| 36 | + |
| 37 | +/** |
| 38 | + If FOR_NETCON in use, setup NrnThread fornetcon related info. |
| 39 | +
|
| 40 | + i.e NrnThread._fornetcon_perm_indices, NrnThread._fornetcon_weight_perm, |
| 41 | + and the relevant dparam element of each mechanism instance that uses |
| 42 | + a FOR_NETCONS statement. |
| 43 | +
|
| 44 | + Makes use of nrn_fornetcon_cnt_, nrn_fornetcon_type_, |
| 45 | + and nrn_fornetcon_index_ that were specified during registration of |
| 46 | + mechanisms that use FOR_NETCONS. |
| 47 | +
|
| 48 | + nrn_fornetcon_cnt_ is the number of mechanisms that use FOR_NETCONS, |
| 49 | + nrn_fornetcon_type_ is an int array of size nrn_fornetcon_cnt, that specifies |
| 50 | + the mechanism type. |
| 51 | + nrn_fornetcon_index_ is an int array of size nrn_fornetcon_cnt, that |
| 52 | + specifies the index into an instance's dparam int array having the |
| 53 | + fornetcon semantics. |
| 54 | +
|
| 55 | + FOR_NETCONS (args) means to loop over all NetCon connecting to this |
| 56 | + target instance and args are the names of the items of each NetCon's |
| 57 | + weight vector (same as the enclosing NET_RECEIVE but possible different |
| 58 | + local names). |
| 59 | +
|
| 60 | + NrnThread._weights is a vector of weight groups where the number of groups |
| 61 | + is the number of NetCon in this thread and each group has a size |
| 62 | + equal to the number of args in the target NET_RECEIVE block. The order |
| 63 | + of these groups is the NetCon Object order in HOC (the construction order). |
| 64 | + So the weight vector indices for the NetCons in the FOR_NETCONS loop |
| 65 | + are not adjacent. |
| 66 | +
|
| 67 | + NrnThread._fornetcon_weight_perm is an index vector into the |
| 68 | + NrnThread._weight vector such that the list of indices that targets a |
| 69 | + mechanism instance are adjacent. |
| 70 | + NrnThread._fornetcon_perm_indices is an index vector into the |
| 71 | + NrnThread._fornetcon_weight_perm to the first of the list of NetCon weights |
| 72 | + that target the instance. The index of _fornetcon_perm_indices |
| 73 | + containing this first in the list is stored in the mechanism instances |
| 74 | + dparam at the dparam's semantic fornetcon slot. (Note that the next index |
| 75 | + points to the first index of the next target instance.) |
| 76 | +
|
| 77 | +**/ |
| 78 | + |
| 79 | +static int* fornetcon_slot(const int mtype, const int instance, |
| 80 | + const int fnslot, const NrnThread& nt) |
| 81 | +{ |
| 82 | + int layout = corenrn.get_mech_data_layout()[mtype]; |
| 83 | + int sz = corenrn.get_prop_dparam_size()[mtype]; |
| 84 | + Memb_list* ml = nt._ml_list[mtype]; |
| 85 | + int* fn = nullptr; |
| 86 | + if (layout == 1) { /* AoS */ |
| 87 | + fn = ml->pdata + (instance*sz + fnslot); |
| 88 | + }else if (layout == 0) { /* SoA */ |
| 89 | + int padded_cnt = nrn_soa_padded_size(ml->nodecount, layout); |
| 90 | + fn = ml->pdata + (fnslot*padded_cnt + instance); |
| 91 | + } |
| 92 | + return fn; |
| 93 | +} |
| 94 | + |
| 95 | +void setup_fornetcon_info(NrnThread& nt) { |
| 96 | + |
| 97 | + if (nrn_fornetcon_cnt_ == 0) { return; } |
| 98 | + |
| 99 | + // Mechanism types in use that have FOR_NETCONS statements |
| 100 | + // Nice to have the dparam fornetcon slot as well so use map |
| 101 | + // instead of set |
| 102 | + std::map<int, int> type_to_slot; |
| 103 | + for (int i = 0; i < nrn_fornetcon_cnt_; ++i) { |
| 104 | + int type = nrn_fornetcon_type_[i]; |
| 105 | + Memb_list* ml = nt._ml_list[type]; |
| 106 | + if (ml && ml->nodecount) { |
| 107 | + type_to_slot[type] = nrn_fornetcon_index_[i]; |
| 108 | + } |
| 109 | + } |
| 110 | + if (type_to_slot.empty()) { |
| 111 | + return; |
| 112 | + } |
| 113 | + |
| 114 | + // How many NetCons (weight groups) are involved. |
| 115 | + // Also count how many weight groups for each target instance. |
| 116 | + // For the latter we can count in the dparam fornetcon slot. |
| 117 | + |
| 118 | + // zero the dparam fornetcon slot for counting and count number of slots. |
| 119 | + size_t n_perm_indices = 0; |
| 120 | + for (const auto& kv: type_to_slot) { |
| 121 | + int mtype = kv.first; |
| 122 | + int fnslot = kv.second; |
| 123 | + int nodecount = nt._ml_list[mtype]->nodecount; |
| 124 | + for (int i=0; i < nodecount; ++i) { |
| 125 | + int* fn = fornetcon_slot(mtype, i, fnslot, nt); |
| 126 | + *fn = 0; |
| 127 | + n_perm_indices += 1; |
| 128 | + } |
| 129 | + } |
| 130 | + |
| 131 | + // Count how many weight groups for each slot and total number of weight groups |
| 132 | + size_t n_weight_perm = 0; |
| 133 | + for (int i = 0; i < nt.n_netcon; ++i) { |
| 134 | + NetCon& nc = nt.netcons[i]; |
| 135 | + int mtype = nc.target_->_type; |
| 136 | + auto search = type_to_slot.find(mtype); |
| 137 | + if (search != type_to_slot.end()) { |
| 138 | + int i_instance = nc.target_->_i_instance; |
| 139 | + int* fn = fornetcon_slot(mtype, i_instance, search->second, nt); |
| 140 | + *fn += 1; |
| 141 | + n_weight_perm += 1; |
| 142 | + } |
| 143 | + } |
| 144 | + |
| 145 | + // Displacement vector has an extra element since the number for last item |
| 146 | + // at n-1 is x[n] - x[n-1] and number for first is x[0] = 0. |
| 147 | + nt._fornetcon_perm_indices.resize(n_perm_indices + 1); |
| 148 | + nt._fornetcon_weight_perm.resize(n_weight_perm); |
| 149 | + |
| 150 | + // From dparam fornetcon slots, compute displacement vector, and |
| 151 | + // set the dparam fornetcon slot to the index of the displacement vector |
| 152 | + // to allow later filling the _fornetcon_weight_perm. |
| 153 | + size_t i_perm_indices = 0; |
| 154 | + nt._fornetcon_perm_indices[0] = 0; |
| 155 | + for (const auto& kv: type_to_slot) { |
| 156 | + int mtype = kv.first; |
| 157 | + int fnslot = kv.second; |
| 158 | + int nodecount = nt._ml_list[mtype]->nodecount; |
| 159 | + for (int i=0; i < nodecount; ++i) { |
| 160 | + int* fn = fornetcon_slot(mtype, i, fnslot, nt); |
| 161 | + nt._fornetcon_perm_indices[i_perm_indices + 1] = |
| 162 | + nt._fornetcon_perm_indices[i_perm_indices] + size_t(*fn); |
| 163 | + *fn = int(nt._fornetcon_perm_indices[i_perm_indices]); |
| 164 | + i_perm_indices += 1; |
| 165 | + } |
| 166 | + } |
| 167 | + |
| 168 | + // One more iteration over NetCon to fill in weight index for |
| 169 | + // nt._fornetcon_weight_perm. To help with this we increment the |
| 170 | + // dparam fornetcon slot on each use. |
| 171 | + for (int i = 0; i < nt.n_netcon; ++i) { |
| 172 | + NetCon& nc = nt.netcons[i]; |
| 173 | + int mtype = nc.target_->_type; |
| 174 | + auto search = type_to_slot.find(mtype); |
| 175 | + if (search != type_to_slot.end()) { |
| 176 | + int i_instance = nc.target_->_i_instance; |
| 177 | + int* fn = fornetcon_slot(mtype, i_instance, search->second, nt); |
| 178 | + size_t nc_w_index = size_t(nc.u.weight_index_); |
| 179 | + nt._fornetcon_weight_perm[size_t(*fn)] = nc_w_index; |
| 180 | + *fn += 1; // next item conceptually adjacent |
| 181 | + } |
| 182 | + } |
| 183 | + |
| 184 | + // Put back the proper values into the dparam fornetcon slot |
| 185 | + i_perm_indices = 0; |
| 186 | + for (const auto& kv: type_to_slot) { |
| 187 | + int mtype = kv.first; |
| 188 | + int fnslot = kv.second; |
| 189 | + int nodecount = nt._ml_list[mtype]->nodecount; |
| 190 | + for (int i=0; i < nodecount; ++i) { |
| 191 | + int* fn = fornetcon_slot(mtype, i, fnslot, nt); |
| 192 | + *fn = int(i_perm_indices); |
| 193 | + i_perm_indices += 1; |
| 194 | + } |
| 195 | + } |
| 196 | +} |
| 197 | + |
| 198 | +} // namespace coreneuron |
0 commit comments