@@ -327,7 +327,6 @@ class KafkaConsumer(six.Iterator):
327
327
'sasl_kerberos_domain_name' : None ,
328
328
'sasl_oauth_token_provider' : None ,
329
329
'socks5_proxy' : None ,
330
- 'legacy_iterator' : False , # enable to revert to < 1.4.7 iterator
331
330
'kafka_client' : KafkaClient ,
332
331
}
333
332
DEFAULT_SESSION_TIMEOUT_MS_0_9 = 30000
@@ -845,8 +844,7 @@ def seek(self, partition, offset):
845
844
assert partition in self ._subscription .assigned_partitions (), 'Unassigned partition'
846
845
log .debug ("Seeking to offset %s for partition %s" , offset , partition )
847
846
self ._subscription .assignment [partition ].seek (offset )
848
- if not self .config ['legacy_iterator' ]:
849
- self ._iterator = None
847
+ self ._iterator = None
850
848
851
849
def seek_to_beginning (self , * partitions ):
852
850
"""Seek to the oldest available offset for partitions.
@@ -871,8 +869,7 @@ def seek_to_beginning(self, *partitions):
871
869
for tp in partitions :
872
870
log .debug ("Seeking to beginning of partition %s" , tp )
873
871
self ._subscription .need_offset_reset (tp , OffsetResetStrategy .EARLIEST )
874
- if not self .config ['legacy_iterator' ]:
875
- self ._iterator = None
872
+ self ._iterator = None
876
873
877
874
def seek_to_end (self , * partitions ):
878
875
"""Seek to the most recent available offset for partitions.
@@ -897,8 +894,7 @@ def seek_to_end(self, *partitions):
897
894
for tp in partitions :
898
895
log .debug ("Seeking to end of partition %s" , tp )
899
896
self ._subscription .need_offset_reset (tp , OffsetResetStrategy .LATEST )
900
- if not self .config ['legacy_iterator' ]:
901
- self ._iterator = None
897
+ self ._iterator = None
902
898
903
899
def subscribe (self , topics = (), pattern = None , listener = None ):
904
900
"""Subscribe to a list of topics, or a topic regex pattern.
@@ -974,8 +970,7 @@ def unsubscribe(self):
974
970
self ._client .cluster .need_all_topic_metadata = False
975
971
self ._client .set_topics ([])
976
972
log .debug ("Unsubscribed all topics or patterns and assigned partitions" )
977
- if not self .config ['legacy_iterator' ]:
978
- self ._iterator = None
973
+ self ._iterator = None
979
974
980
975
def metrics (self , raw = False ):
981
976
"""Get metrics on consumer performance.
@@ -1157,73 +1152,12 @@ def _message_generator_v2(self):
1157
1152
self ._subscription .assignment [tp ].position = OffsetAndMetadata (record .offset + 1 , '' , - 1 )
1158
1153
yield record
1159
1154
1160
- def _message_generator (self ):
1161
- assert self .assignment () or self .subscription () is not None , 'No topic subscription or manual partition assignment'
1162
-
1163
- def inner_poll_ms ():
1164
- return max (0 , min ((1000 * (self ._consumer_timeout - time .time ())), self .config ['retry_backoff_ms' ]))
1165
-
1166
- while time .time () < self ._consumer_timeout :
1167
-
1168
- if not self ._coordinator .poll (timeout_ms = inner_poll_ms ()):
1169
- continue
1170
-
1171
- # Fetch offsets for any subscribed partitions that we arent tracking yet
1172
- if not self ._subscription .has_all_fetch_positions ():
1173
- partitions = self ._subscription .missing_fetch_positions ()
1174
- self ._update_fetch_positions (partitions )
1175
-
1176
- self ._client .poll (timeout_ms = inner_poll_ms ())
1177
-
1178
- # after the long poll, we should check whether the group needs to rebalance
1179
- # prior to returning data so that the group can stabilize faster
1180
- if self ._coordinator .need_rejoin ():
1181
- continue
1182
-
1183
- # We need to make sure we at least keep up with scheduled tasks,
1184
- # like heartbeats, auto-commits, and metadata refreshes
1185
- timeout_at = self ._next_timeout ()
1186
-
1187
- # Short-circuit the fetch iterator if we are already timed out
1188
- # to avoid any unintentional interaction with fetcher setup
1189
- if time .time () > timeout_at :
1190
- continue
1191
-
1192
- for msg in self ._fetcher :
1193
- yield msg
1194
- if time .time () > timeout_at :
1195
- log .debug ("internal iterator timeout - breaking for poll" )
1196
- break
1197
- self ._client .poll (timeout_ms = 0 )
1198
-
1199
- # An else block on a for loop only executes if there was no break
1200
- # so this should only be called on a StopIteration from the fetcher
1201
- # We assume that it is safe to init_fetches when fetcher is done
1202
- # i.e., there are no more records stored internally
1203
- else :
1204
- self ._fetcher .send_fetches ()
1205
-
1206
- def _next_timeout (self ):
1207
- timeout = min (self ._consumer_timeout ,
1208
- self ._client .cluster .ttl () / 1000.0 + time .time (),
1209
- self ._coordinator .time_to_next_poll () + time .time ())
1210
- return timeout
1211
-
1212
1155
def __iter__ (self ): # pylint: disable=non-iterator-returned
1213
1156
return self
1214
1157
1215
1158
def __next__ (self ):
1216
1159
if self ._closed :
1217
1160
raise StopIteration ('KafkaConsumer closed' )
1218
- # Now that the heartbeat thread runs in the background
1219
- # there should be no reason to maintain a separate iterator
1220
- # but we'll keep it available for a few releases just in case
1221
- if self .config ['legacy_iterator' ]:
1222
- return self .next_v1 ()
1223
- else :
1224
- return self .next_v2 ()
1225
-
1226
- def next_v2 (self ):
1227
1161
self ._set_consumer_timeout ()
1228
1162
while time .time () < self ._consumer_timeout :
1229
1163
if not self ._iterator :
@@ -1234,17 +1168,6 @@ def next_v2(self):
1234
1168
self ._iterator = None
1235
1169
raise StopIteration ()
1236
1170
1237
- def next_v1 (self ):
1238
- if not self ._iterator :
1239
- self ._iterator = self ._message_generator ()
1240
-
1241
- self ._set_consumer_timeout ()
1242
- try :
1243
- return next (self ._iterator )
1244
- except StopIteration :
1245
- self ._iterator = None
1246
- raise
1247
-
1248
1171
def _set_consumer_timeout (self ):
1249
1172
# consumer_timeout_ms can be used to stop iteration early
1250
1173
if self .config ['consumer_timeout_ms' ] >= 0 :
0 commit comments