|
CUV
0.9.201304091348
|
wrappers of convolution operations by Alex Kriszevsky More...
Enumerations | |
| enum | pool_type { PT_MAX, PT_AVG } |
| two "simple" ways to do pooling in a network More... | |
Functions | |
| template<class V , class M , class T > | |
| void | reorder_for_conv (tensor< V, M, T > &dst, const tensor< V, M, T > &src) |
| Reorder memory for application of Alex' convolution routines. | |
| template<class V , class M , class T > | |
| void | reorder_from_conv (tensor< V, M, T > &dst, const tensor< V, M, T > &src) |
Reverse operation of reorder_for_conv. | |
| template<class V , class M , class T > | |
| void | convolve2d (tensor< V, M, T > &dst, const tensor< V, M, T > &img, const tensor< V, M, T > &filter, int paddingStart=0, unsigned int moduleStride=0, unsigned int nGroups=0, float factNew=1.f, float factOld=0.f) |
| convolve a set of images with a set of filters | |
| template<class V , class M , class T > | |
| void | d_conv2d_dimg (tensor< V, M, T > &dst, const tensor< V, M, T > &delta, const tensor< V, M, T > &filter, int paddingStart=0, unsigned int moduleStride=0, unsigned int nGroups=0, float factNew=1.f, float factOld=0.f) |
| determine the gradient of a convolution w.r.t. | |
| template<class V , class M , class T > | |
| void | d_conv2d_dfilt (tensor< V, M, T > &dst, const tensor< V, M, T > &delta, const tensor< V, M, T > &input, int paddingStart=0, unsigned int moduleStride=0, unsigned int nGroups=0, unsigned int partialSum=1, float factNew=1.f, float factOld=0.f) |
| determine the gradient of a convolution w.r.t. | |
| template<class V , class M , class T > | |
| void | local_pool (tensor< V, M, T > &dst, const tensor< V, M, T > &images, int subsX, int startX, int strideX, int outputsX, pool_type pooler) |
| local pooling (average/max) | |
| template<class V , class M , class T > | |
| void | local_max_pool_grad (tensor< V, M, T > &target, const tensor< V, M, T > &images, const tensor< V, M, T > &maxGrads, const tensor< V, M, T > &maxActs, int subsX, int startX, int strideX, float factNew=1.f, float factOld=0.f) |
| derivative of local max-pooling | |
| template<class V , class M , class T > | |
| void | local_avg_pool_grad (tensor< V, M, T > &target, const tensor< V, M, T > &avgGrads, int subsX, int startX, int strideX) |
| derivative of local avg-pooling | |
| template<class V , class M , class T > | |
| void | response_normalization (tensor< V, M, T > &target, tensor< V, M, T > &denoms, const tensor< V, M, T > &images, int patchSize, float addScale, float powScale) |
| response normalization. | |
| template<class V , class M , class T > | |
| void | response_normalization_grad (tensor< V, M, T > &input_gradients, tensor< V, M, T > &original_outputs, const tensor< V, M, T > &original_inputs, const tensor< V, M, T > &delta, const tensor< V, M, T > &denoms, int patchSize, float addScale, float powScale, float factNew=1.f, float factOld=0.f) |
derivative of response_normalization. | |
| template<class V , class M , class T > | |
| void | contrast_normalization (tensor< V, M, T > &target, tensor< V, M, T > &denoms, const tensor< V, M, T > &meanDiffs, const tensor< V, M, T > &images, int patchSize, float addScale, float powScale) |
| contrast normalization. | |
| template<class V , class M , class T > | |
| void | contrast_normalization_grad (tensor< V, M, T > &input_gradients, tensor< V, M, T > &original_outputs, const tensor< V, M, T > &meanDiffs, const tensor< V, M, T > &delta, const tensor< V, M, T > &denoms, int patchSize, float addScale, float powScale, float factNew=1.f, float factOld=0.f) |
derivative of response_normalization. | |
| template<class V , class M , class T > | |
| void | response_norm_cross_map (tensor< V, M, T > &target, tensor< V, M, T > &denoms, const tensor< V, M, T > &images, int sizeF, float addScale, float powScale, bool blocked) |
| response normalization accross maps. | |
| template<class V , class M , class T > | |
| void | response_norm_cross_map_grad (tensor< V, M, T > &input_gradients, tensor< V, M, T > &original_outputs, const tensor< V, M, T > &original_inputs, const tensor< V, M, T > &delta, const tensor< V, M, T > &denoms, int sizeF, float addScale, float powScale, bool blocked, float factNew=1.f, float factOld=0.f) |
gradient of response_norm_cross_map | |
| template<class V , class M , class T > | |
| void | gaussian_blur (tensor< V, M, T > &target, const tensor< V, M, T > &images, const tensor< V, M, T > &filter, bool horiz, float factNew=1.f, float factOld=0.f) |
| gaussian blur (keeps size constant!). | |
| template<class V , class M , class T > | |
| void | bed_of_nails (tensor< V, M, T > &target, const tensor< V, M, T > &images, int startX, int strideX, float factNew=1.f, float factOld=0.f) |
| Bed of nails subsampling (take every n-th value in each direction). | |
| template<class V , class M , class T > | |
| void | bed_of_nails_grad (tensor< V, M, T > &target, const tensor< V, M, T > &delta, int startX, int strideX, float factNew=1.f, float factOld=0.f) |
Gradient of bed_of_nails. | |
| template<class V , class M , class T > | |
| void | crop (tensor< V, M, T > &cropped, const tensor< V, M, T > &images, int startY, int startX) |
| cropping | |
| template<class V , class M , class T > | |
| void | resize_bilinear (tensor< V, M, T > &dest, const tensor< V, M, T > &images, float scale) |
| bilinear resizing | |
| template<class V , class M , class T > | |
| void | pairwise_norm (tensor< V, M, T > &dst, const tensor< V, M, T > &src, unsigned int dim) |
| square the input, then add every map pair and take the square root. | |
| template<class V , class M , class T > | |
| void | pairwise_norm_grad (tensor< V, M, T > &dst, const tensor< V, M, T > &X, const tensor< V, M, T > &D, unsigned int dim) |
| calculates the gradient of pairwise_norm. | |
wrappers of convolution operations by Alex Kriszevsky
two "simple" ways to do pooling in a network
Definition at line 112 of file convolution_ops.hpp.
| void cuv::alex_conv::bed_of_nails | ( | tensor< V, M, T > & | target, |
| const tensor< V, M, T > & | images, | ||
| int | startX, | ||
| int | strideX, | ||
| float | factNew = 1.f, |
||
| float | factOld = 0.f |
||
| ) |
Bed of nails subsampling (take every n-th value in each direction).
| target | OUT Where result is written to (smaller) |
| images | IN inputs (nChannels x nImgPixY x nImgPixX x nImg) |
| startX | IN where to start sampling |
| strideX | IN distance btw. picked values |
| factNew | IN multiplier for newly calculated values |
| factOld | IN multiplier for data already in target |
| void cuv::alex_conv::bed_of_nails_grad | ( | tensor< V, M, T > & | target, |
| const tensor< V, M, T > & | delta, | ||
| int | startX, | ||
| int | strideX, | ||
| float | factNew = 1.f, |
||
| float | factOld = 0.f |
||
| ) |
Gradient of bed_of_nails.
| target | OUT Where result is written to (larger) |
| delta | IN outer derivative of current function |
| startX | IN where to start sampling |
| strideX | IN distance btw. picked values |
| factNew | IN multiplier for newly calculated values |
| factOld | IN multiplier for data already in target |
| void cuv::alex_conv::contrast_normalization | ( | tensor< V, M, T > & | target, |
| tensor< V, M, T > & | denoms, | ||
| const tensor< V, M, T > & | meanDiffs, | ||
| const tensor< V, M, T > & | images, | ||
| int | patchSize, | ||
| float | addScale, | ||
| float | powScale | ||
| ) |
contrast normalization.
in a local patch
around x, calculates
| target | OUT |
| denoms | OUT needed for gradient calculation, same shape as inputs |
| meanDiffs | IN difference to mean |
| images | IN inputs |
| patchSize | IN width of (square) patches to operate on |
| float | IN addScale |
| float | IN powScale |
| void cuv::alex_conv::contrast_normalization_grad | ( | tensor< V, M, T > & | input_gradients, |
| tensor< V, M, T > & | original_outputs, | ||
| const tensor< V, M, T > & | meanDiffs, | ||
| const tensor< V, M, T > & | delta, | ||
| const tensor< V, M, T > & | denoms, | ||
| int | patchSize, | ||
| float | addScale, | ||
| float | powScale, | ||
| float | factNew = 1.f, |
||
| float | factOld = 0.f |
||
| ) |
derivative of response_normalization.
| float | OUT input_gradients the gradient w.r.t. x |
| float | INOUT original_outputs (will be overwritten during calculation) |
| meanDiffs | IN difference to mean |
| float | IN original_inputs the original inputs to response_normalization |
| float | IN denoms the intermediate result returned by response_normalization |
| float | IN delta outer derivative of the current function (=backpropagated gradient) |
| float | IN addScale |
| float | IN powScale |
| void cuv::alex_conv::convolve2d | ( | tensor< V, M, T > & | dst, |
| const tensor< V, M, T > & | img, | ||
| const tensor< V, M, T > & | filter, | ||
| int | paddingStart = 0, |
||
| unsigned int | moduleStride = 0, |
||
| unsigned int | nGroups = 0, |
||
| float | factNew = 1.f, |
||
| float | factOld = 0.f |
||
| ) |
convolve a set of images with a set of filters
| dst | (nFilt, nModulesY, nModulesX, nImg) |
| img | (nImgChan, nImgPixY, nImgPixX, nImg) |
| filter | (nFiltChan, nFiltPix, nFilt) |
| void cuv::alex_conv::d_conv2d_dfilt | ( | tensor< V, M, T > & | dst, |
| const tensor< V, M, T > & | delta, | ||
| const tensor< V, M, T > & | input, | ||
| int | paddingStart = 0, |
||
| unsigned int | moduleStride = 0, |
||
| unsigned int | nGroups = 0, |
||
| unsigned int | partialSum = 1, |
||
| float | factNew = 1.f, |
||
| float | factOld = 0.f |
||
| ) |
determine the gradient of a convolution w.r.t.
the filters
| dst | (nModules/partialSum, nFilterColors, filterPixels, nFilters) |
| input | (nImgColors, nImgPixY, nImgPixX, nImages), with stride given |
| hidActs | (nFilters, nModulesY, nModulesX, nImages) |
| void cuv::alex_conv::d_conv2d_dimg | ( | tensor< V, M, T > & | dst, |
| const tensor< V, M, T > & | delta, | ||
| const tensor< V, M, T > & | filter, | ||
| int | paddingStart = 0, |
||
| unsigned int | moduleStride = 0, |
||
| unsigned int | nGroups = 0, |
||
| float | factNew = 1.f, |
||
| float | factOld = 0.f |
||
| ) |
determine the gradient of a convolution w.r.t.
the inputs
| dst | (nImageColors, nImgPixY, nImgPixX, nImages) |
| delta | (nFilt, nModulesY, nModulesX, nImg) |
| filters | (nFilterColors, filterPixels, nFilters) |
| void cuv::alex_conv::gaussian_blur | ( | tensor< V, M, T > & | target, |
| const tensor< V, M, T > & | images, | ||
| const tensor< V, M, T > & | filter, | ||
| bool | horiz, | ||
| float | factNew = 1.f, |
||
| float | factOld = 0.f |
||
| ) |
gaussian blur (keeps size constant!).
| target | OUT where blurred data is written to |
| images | IN (unblurred) inputs |
| filter | IN filter to convolve with (2k+1) |
| horiz | IN whether this is the horizontal or vertical filter pass |
| factNew | IN multiplier for newly calculated values |
| factOld | IN multiplier for data already in target |
| void cuv::alex_conv::local_pool | ( | tensor< V, M, T > & | dst, |
| const tensor< V, M, T > & | images, | ||
| int | subsX, | ||
| int | startX, | ||
| int | strideX, | ||
| int | outputsX, | ||
| pool_type | pooler | ||
| ) |
local pooling (average/max)
| images | (numFilters, nImgPixY, nImgPixX, numImages) |
| dst,: | (numFilters, nImgPixY/n, nImgPixX/n, numImages) |
| void cuv::alex_conv::pairwise_norm | ( | tensor< V, M, T > & | dst, |
| const tensor< V, M, T > & | src, | ||
| unsigned int | dim | ||
| ) |
square the input, then add every map pair and take the square root.
In NumPy notation, this implements:
dst = sqrt((src ** 2)[::2] + (src ** 2)[1::2])
I.e., if pairs of src represent the result of convolution with orthogonal filters, then this calculates the norm.
| void cuv::alex_conv::pairwise_norm_grad | ( | tensor< V, M, T > & | dst, |
| const tensor< V, M, T > & | X, | ||
| const tensor< V, M, T > & | D, | ||
| unsigned int | dim | ||
| ) |
calculates the gradient of pairwise_norm.
In NumPy notation, this implements:
dst[::2] = 2.0 / PN * X[::2] dst[1::2] = 2.0 / PN * X[1::2]
where
PN = pairwise_norm(X)
| dst | where to write result |
| X | the original input to the pairwise norm |
| D | the backpropagated delta |
| void cuv::alex_conv::reorder_for_conv | ( | tensor< V, M, T > & | dst, |
| const tensor< V, M, T > & | src | ||
| ) |
Reorder memory for application of Alex' convolution routines.
The routines by Alex require images to be in a slightly unintuitive memory order: (nChannels, nPixH, nPixW, nImages). This is a convenience function to change images of the form (nImages,nChannels,nPixH,nPixW) to the required format at the cost of one transpose operation.
| void cuv::alex_conv::reorder_from_conv | ( | tensor< V, M, T > & | dst, |
| const tensor< V, M, T > & | src | ||
| ) |
Reverse operation of reorder_for_conv.
| void cuv::alex_conv::response_norm_cross_map | ( | tensor< V, M, T > & | target, |
| tensor< V, M, T > & | denoms, | ||
| const tensor< V, M, T > & | images, | ||
| int | sizeF, | ||
| float | addScale, | ||
| float | powScale, | ||
| bool | blocked | ||
| ) |
response normalization accross maps.
| target | OUT normalized outputs are written here. |
| denoms | OUT intermediate output used for gradient calculation |
| images | IN the images which are to be normalized (4D: nChannels x nImgPixY x nImgPixX x nImg) |
| sizeF | IN the number of filters to normalize over |
| void cuv::alex_conv::response_normalization | ( | tensor< V, M, T > & | target, |
| tensor< V, M, T > & | denoms, | ||
| const tensor< V, M, T > & | images, | ||
| int | patchSize, | ||
| float | addScale, | ||
| float | powScale | ||
| ) |
response normalization.
in a local patch
around x, calculates
| target | OUT |
| denoms | OUT needed for gradient calculation, same shape as inputs |
| images | IN inputs |
| patchSize | IN width of (square) patches to operate on |
| float | IN addScale |
| float | IN powScale |
| void cuv::alex_conv::response_normalization_grad | ( | tensor< V, M, T > & | input_gradients, |
| tensor< V, M, T > & | original_outputs, | ||
| const tensor< V, M, T > & | original_inputs, | ||
| const tensor< V, M, T > & | delta, | ||
| const tensor< V, M, T > & | denoms, | ||
| int | patchSize, | ||
| float | addScale, | ||
| float | powScale, | ||
| float | factNew = 1.f, |
||
| float | factOld = 0.f |
||
| ) |
derivative of response_normalization.
| float | OUT input_gradients the gradient w.r.t. x |
| float | INOUT original_outputs (will be overwritten during calculation) |
| float | IN original_inputs the original inputs to response_normalization |
| float | IN denoms the intermediate result returned by response_normalization |
| float | IN delta outer derivative of the current function (=backpropagated gradient) |
| float | IN addScale |
| float | IN powScale |
1.8.1.2