@@ -61,7 +61,7 @@ public class MissingConsumingSegmentFinder {
6161
6262 private final String _realtimeTableName ;
6363 private final SegmentMetadataFetcher _segmentMetadataFetcher ;
64- private final Map <Integer , StreamPartitionMsgOffset > _partitionGroupIdToLargestStreamOffsetMap ;
64+ private final Map <String , StreamPartitionMsgOffset > _partitionGroupInfoToLargestStreamOffsetMap ;
6565 private final StreamPartitionMsgOffsetFactory _streamPartitionMsgOffsetFactory ;
6666
6767 private ControllerMetrics _controllerMetrics ;
@@ -75,15 +75,16 @@ public MissingConsumingSegmentFinder(String realtimeTableName, ZkHelixPropertySt
7575 StreamConsumerFactoryProvider .create (streamConfigs .get (0 )).createStreamMsgOffsetFactory ();
7676
7777 // create partition group id to largest stream offset map
78- _partitionGroupIdToLargestStreamOffsetMap = new HashMap <>();
78+ _partitionGroupInfoToLargestStreamOffsetMap = new HashMap <>();
7979 streamConfigs .stream ().map (streamConfig -> {
8080 streamConfig .setOffsetCriteria (OffsetCriteria .LARGEST_OFFSET_CRITERIA );
8181 return streamConfig ;
8282 });
8383 try {
8484 PinotTableIdealStateBuilder .getPartitionGroupMetadataList (streamConfigs , Collections .emptyList (), false )
8585 .forEach (metadata -> {
86- _partitionGroupIdToLargestStreamOffsetMap .put (metadata .getPartitionGroupId (), metadata .getStartOffset ());
86+ _partitionGroupInfoToLargestStreamOffsetMap .put (metadata .getPartitionGroupInfo (),
87+ metadata .getStartOffset ());
8788 });
8889 } catch (Exception e ) {
8990 LOGGER .warn ("Problem encountered in fetching stream metadata for topics: {} of table: {}. "
@@ -95,11 +96,11 @@ public MissingConsumingSegmentFinder(String realtimeTableName, ZkHelixPropertySt
9596
9697 @ VisibleForTesting
9798 MissingConsumingSegmentFinder (String realtimeTableName , SegmentMetadataFetcher segmentMetadataFetcher ,
98- Map <Integer , StreamPartitionMsgOffset > partitionGroupIdToLargestStreamOffsetMap ,
99+ Map <String , StreamPartitionMsgOffset > partitionGroupInfoToLargestStreamOffsetMap ,
99100 StreamPartitionMsgOffsetFactory streamPartitionMsgOffsetFactory ) {
100101 _realtimeTableName = realtimeTableName ;
101102 _segmentMetadataFetcher = segmentMetadataFetcher ;
102- _partitionGroupIdToLargestStreamOffsetMap = partitionGroupIdToLargestStreamOffsetMap ;
103+ _partitionGroupInfoToLargestStreamOffsetMap = partitionGroupInfoToLargestStreamOffsetMap ;
103104 _streamPartitionMsgOffsetFactory = streamPartitionMsgOffsetFactory ;
104105 }
105106
@@ -118,24 +119,24 @@ public void findAndEmitMetrics(IdealState idealState) {
118119 @ VisibleForTesting
119120 MissingSegmentInfo findMissingSegments (Map <String , Map <String , String >> idealStateMap , Instant now ) {
120121 // create the maps
121- Map <Integer , LLCSegmentName > partitionGroupIdToLatestConsumingSegmentMap = new HashMap <>();
122- Map <Integer , LLCSegmentName > partitionGroupIdToLatestCompletedSegmentMap = new HashMap <>();
122+ Map <String , LLCSegmentName > partitionGroupInfoToLatestConsumingSegmentMap = new HashMap <>();
123+ Map <String , LLCSegmentName > partitionGroupInfoToLatestCompletedSegmentMap = new HashMap <>();
123124 idealStateMap .forEach ((segmentName , instanceToStatusMap ) -> {
124125 LLCSegmentName llcSegmentName = LLCSegmentName .of (segmentName );
125126 if (llcSegmentName != null ) { // Skip the uploaded realtime segments that don't conform to llc naming
126127 if (instanceToStatusMap .containsValue (SegmentStateModel .CONSUMING )) {
127- updateMap (partitionGroupIdToLatestConsumingSegmentMap , llcSegmentName );
128+ updateMap (partitionGroupInfoToLatestConsumingSegmentMap , llcSegmentName );
128129 } else if (instanceToStatusMap .containsValue (SegmentStateModel .ONLINE )) {
129- updateMap (partitionGroupIdToLatestCompletedSegmentMap , llcSegmentName );
130+ updateMap (partitionGroupInfoToLatestCompletedSegmentMap , llcSegmentName );
130131 }
131132 }
132133 });
133134
134135 MissingSegmentInfo missingSegmentInfo = new MissingSegmentInfo ();
135- if (!_partitionGroupIdToLargestStreamOffsetMap .isEmpty ()) {
136- _partitionGroupIdToLargestStreamOffsetMap .forEach ((partitionGroupId , largestStreamOffset ) -> {
137- if (!partitionGroupIdToLatestConsumingSegmentMap .containsKey (partitionGroupId )) {
138- LLCSegmentName latestCompletedSegment = partitionGroupIdToLatestCompletedSegmentMap .get (partitionGroupId );
136+ if (!_partitionGroupInfoToLargestStreamOffsetMap .isEmpty ()) {
137+ _partitionGroupInfoToLargestStreamOffsetMap .forEach ((partitionGroupInfo , largestStreamOffset ) -> {
138+ if (!partitionGroupInfoToLatestConsumingSegmentMap .containsKey (partitionGroupInfo )) {
139+ LLCSegmentName latestCompletedSegment = partitionGroupInfoToLatestCompletedSegmentMap .get (partitionGroupInfo );
139140 if (latestCompletedSegment == null ) {
140141 // There's no consuming or completed segment for this partition group. Possibilities:
141142 // 1) it's a new partition group that has not yet been detected
@@ -152,37 +153,36 @@ MissingSegmentInfo findMissingSegments(Map<String, Map<String, String>> idealSta
152153 if (completedSegmentEndOffset .compareTo (largestStreamOffset ) < 0 ) {
153154 // there are unconsumed messages available on the stream
154155 missingSegmentInfo ._totalCount ++;
155- updateMaxDurationInfo (missingSegmentInfo , partitionGroupId , segmentZKMetadata .getCreationTime (), now );
156+ updateMaxDurationInfo (missingSegmentInfo , partitionGroupInfo , segmentZKMetadata .getCreationTime (), now );
156157 }
157158 }
158159 }
159160 });
160161 } else {
161- partitionGroupIdToLatestCompletedSegmentMap .forEach ((partitionGroupId , latestCompletedSegment ) -> {
162- if (!partitionGroupIdToLatestConsumingSegmentMap .containsKey (partitionGroupId )) {
162+ partitionGroupInfoToLatestCompletedSegmentMap .forEach ((partitionGroupInfo , latestCompletedSegment ) -> {
163+ if (!partitionGroupInfoToLatestConsumingSegmentMap .containsKey (partitionGroupInfo )) {
163164 missingSegmentInfo ._totalCount ++;
164165 long segmentCompletionTimeMillis = _segmentMetadataFetcher
165166 .fetchSegmentCompletionTime (_realtimeTableName , latestCompletedSegment .getSegmentName ());
166- updateMaxDurationInfo (missingSegmentInfo , partitionGroupId , segmentCompletionTimeMillis , now );
167+ updateMaxDurationInfo (missingSegmentInfo , partitionGroupInfo , segmentCompletionTimeMillis , now );
167168 }
168169 });
169170 }
170171 return missingSegmentInfo ;
171172 }
172173
173- private void updateMaxDurationInfo (MissingSegmentInfo missingSegmentInfo , Integer partitionGroupId ,
174+ private void updateMaxDurationInfo (MissingSegmentInfo missingSegmentInfo , String partitionGroupInfo ,
174175 long segmentCompletionTimeMillis , Instant now ) {
175176 long duration = Duration .between (Instant .ofEpochMilli (segmentCompletionTimeMillis ), now ).toMinutes ();
176177 if (duration > missingSegmentInfo ._maxDurationInMinutes ) {
177178 missingSegmentInfo ._maxDurationInMinutes = duration ;
178179 }
179- LOGGER .warn ("PartitionGroupId {} hasn't had a consuming segment for {} minutes!" , partitionGroupId , duration );
180+ LOGGER .warn ("PartitionGroupId {} hasn't had a consuming segment for {} minutes!" , partitionGroupInfo , duration );
180181 }
181182
182- private void updateMap (Map <Integer , LLCSegmentName > partitionGroupIdToLatestSegmentMap ,
183+ private void updateMap (Map <String , LLCSegmentName > partitionGroupInfoToLatestSegmentMap ,
183184 LLCSegmentName llcSegmentName ) {
184- int partitionGroupId = llcSegmentName .getPartitionGroupId ();
185- partitionGroupIdToLatestSegmentMap .compute (partitionGroupId , (pid , existingSegment ) -> {
185+ partitionGroupInfoToLatestSegmentMap .compute (llcSegmentName .getPartitionGroupInfo (), (pid , existingSegment ) -> {
186186 if (existingSegment == null ) {
187187 return llcSegmentName ;
188188 } else {
0 commit comments