3 @brief Double-layer oscillatory network with phase oscillator for image segmentation.
4 @details Implementation based on paper @cite inproceedings::nnet::syncsegm::1.
6 @authors Andrei Novikov (pyclustering@yandex.ru)
8 @copyright BSD-3-Clause
14 from math
import floor
18 except Exception
as error_instance:
19 warnings.warn(
"Impossible to import PIL (please, install 'PIL'), pyclustering's visualization "
20 "functionality is partially not available (details: '%s')." % str(error_instance))
32 @brief Result visualizer of double-layer oscillatory network 'syncsegm'.
39 @brief Shows output dynamic of the first layer.
41 @param[in] analyser (syncsegm_analyser): Analyser of output dynamic of the 'syncsegm' oscillatory network.
45 sync_visualizer.show_output_dynamic(analyser.get_first_layer_analyser());
51 @brief Shows output dynamic of the second layer.
53 @param[in] analyser (syncsegm_analyser): Analyser of output dynamic of the 'syncsegm' oscillatory network.
57 second_layer_analysers = analyser.get_second_layer_analysers();
58 analysers_sequence = [ object_segment_analyser[
'analyser']
for object_segment_analyser
in second_layer_analysers ]
60 sync_visualizer.show_output_dynamics(analysers_sequence);
65 @brief Performs analysis of output dynamic of the double-layer oscillatory network 'syncsegm' to extract information about segmentation results.
69 def __init__(self, color_analyser, object_segment_analysers = None):
71 @brief Constructor of the analyser.
73 @param[in] color_analyser (list): Analyser of coloring segmentation results of the first layer.
74 @param[in] object_segment_analysers (list): Analysers of objects on image segments - results of the second layer.
84 @brief Returns analyser of coloring segmentation of the first layer.
93 @brief Returns analysers of object segmentation of the second layer.
102 @brief Allocates color segments.
104 @param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment.
105 @param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise.
107 @return (list) Color segments where each color segment consists of indexes of pixels that forms color segment.
112 real_segments = [cluster
for cluster
in segments
if len(cluster) > noise_size];
113 return real_segments;
118 @brief Allocates object segments.
120 @param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment.
121 @param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise.
123 @return (list) Object segments where each object segment consists of indexes of pixels that forms object segment.
132 indexes = object_segment_analyser[
'color_segment'];
133 analyser = object_segment_analyser[
'analyser'];
135 segments += analyser.allocate_clusters(eps, indexes);
137 real_segments = [segment
for segment
in segments
if len(segment) > noise_size];
138 return real_segments;
143 @brief Class represents segmentation algorithm syncsegm.
144 @details syncsegm is a bio-inspired algorithm that is based on double-layer oscillatory network that uses modified Kuramoto model.
145 Algorithm extracts colors and colored objects. It uses only CCORE (C++ implementation of pyclustering) parts to implement the algorithm.
147 CCORE option is True by default to use sync network in the pyclustering core - C/C++ shared library for processing that significantly increases performance.
151 # create oscillatory for image segmentaion - extract colors (radius 128) and objects (radius 4),
152 # and ignore noise (segments with size that is less than 10 pixels)
153 algorithm = syncsegm(128, 4, 10);
155 # extract segments (colors and objects)
156 analyser = algorithm(path_to_file);
158 # obtain segmentation results (only colors - from the first layer)
159 color_segments = analyser.allocate_colors(0.01, 10);
160 draw_image_mask_segments(path_to_file, color_segments);
162 # obtain segmentation results (objects - from the second layer)
163 object_segments = analyser.allocate_objects(0.01, 10);
164 draw_image_mask_segments(path_to_file, object_segments);
169 def __init__(self, color_radius, object_radius, noise_size = 0, ccore = True):
171 @brief Contructor of the oscillatory network SYNC for cluster analysis.
173 @param[in] color_radius (double): Radius of color connectivity (color similarity) for the first layer.
174 @param[in] object_radius (double): Radius of object connectivity (object similarity) for the second layer,
175 if 'None' then object segmentation is not performed (only color segmentation).
176 @param[in] noise_size (double): Size of segment that should be considered as a noise and ignored by the second layer.
177 @param[in] ccore (bool): If 'True' then C/C++ implementation is used to increase performance.
192 def process(self, image_source, collect_dynamic = False, order_color = 0.9995, order_object = 0.999):
194 @brief Performs image segmentation.
196 @param[in] image_source (string): Path to image file that should be processed.
197 @param[in] collect_dynamic (bool): If 'True' then whole dynamic of each layer of the network is collected.
198 @param[in] order_color (double): Local synchronization order for the first layer - coloring segmentation.
199 @param[in] order_object (double): Local synchronization order for the second layer - object segmentation.
201 @return (syncsegm_analyser) Analyser of segmentation results by the network.
208 data = read_image(image_source)
214 object_segment_analysers = self.
__analyse_objects(image_source, color_analyser, collect_dynamic)
218 def __analyse_colors(self, image_data, collect_dynamic):
220 @brief Performs color segmentation by the first layer.
222 @param[in] image_data (array_like): Image sample as a array-like structure.
223 @param[in] collect_dynamic (bool): If 'True' then whole dynamic of the first layer of the network is collected.
225 @return (syncnet_analyser) Analyser of color segmentation results of the first layer.
230 analyser = network.process(self.
__order_color, solve_type.FAST, collect_dynamic);
235 def __analyse_objects(self, image_source, color_analyser, collect_dynamic):
237 @brief Performs object segmentation by the second layer.
239 @param[in] image_source (string): Path to image file that should be processed.
240 @param[in] color_analyser (syncnet_analyser): Analyser of color segmentation results.
241 @param[in] collect_dynamic (bool): If 'True' then whole dynamic of the first layer of the network is collected.
243 @return (map) Analysers of object segments.
248 pointer_image = Image.open(image_source);
249 image_size = pointer_image.size;
251 object_analysers = [];
253 color_segments = color_analyser.allocate_clusters();
255 for segment
in color_segments:
257 if (object_analyser
is not None):
258 object_analysers.append( {
'color_segment': segment,
'analyser': object_analyser } );
260 pointer_image.close();
261 return object_analysers;
264 def __analyse_color_segment(self, image_size, color_segment, collect_dynamic):
266 @brief Performs object segmentation of separate segment.
268 @param[in] image_size (list): Image size presented as a [width x height].
269 @param[in] color_segment (list): Image segment that should be processed.
270 @param[in] collect_dynamic (bool): If 'True' then whole dynamic of the second layer of the network is collected.
272 @return (syncnet_analyser) Analyser of object segmentation results of the second layer.
280 network =
syncnet(coordinates, self.
__object_radius, initial_phases = initial_type.EQUIPARTITION, ccore =
True);
281 analyser = network.process(self.
__order_object, solve_type.FAST, collect_dynamic);
286 def __extract_location_coordinates(self, image_size, color_segment):
288 @brief Extracts coordinates of specified image segment.
290 @param[in] image_size (list): Image size presented as a [width x height].
291 @param[in] color_segment (list): Image segment whose coordinates should be extracted.
293 @return (list) Coordinates of each pixel.
297 for index
in color_segment:
298 y = floor(index / image_size[0]);
299 x = index - y * image_size[0];
301 coordinates.append([x, y]);