Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 58 additions & 32 deletions databricks/koalas/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -3098,7 +3098,7 @@ def idxmax(self, skipna=True):
"""
Return the row label of the maximum value.

If multiple values equal the maximum, the row label with that
If multiple values equal the maximum, the first row label with that
value is returned.

Parameters
Expand Down Expand Up @@ -3158,34 +3158,47 @@ def idxmax(self, skipna=True):

>>> s.idxmax()
('b', 'f')

If multiple values equal the maximum, the first row label with that
value is returned.

>>> s = ks.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
>>> s
10 1
3 100
5 1
2 100
1 1
8 100
Name: 0, dtype: int64

>>> s.idxmax()
3
"""
if len(self) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")

sdf = self._internal._sdf
scol = self._scol
index_scols = self._internal.index_scols
# desc_nulls_(last|first) is used via Py4J directly because
# it's not supported in Spark 2.3.
if skipna:
sdf = sdf.orderBy(Column(scol._jc.desc_nulls_last()))
max_value = sdf.orderBy(scol.desc_nulls_last()).select([scol]).first()[0]
else:
sdf = sdf.orderBy(Column(scol._jc.desc_nulls_first()))
results = sdf.select([scol] + index_scols).take(1)
if len(results) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")
if results[0][0] is None:
# This will only happens when skipna is False because we will
# place nulls first.
max_value = sdf.orderBy(scol.desc_nulls_first()).select([scol]).first()[0]

if max_value is None:
return np.nan
values = list(results[0][1:])
if len(values) == 1:
return values[0]
else:
return tuple(values)

sdf = sdf.filter(scol == max_value)
max_idx = tuple(sdf.select(index_scols).first())

return max_idx if len(max_idx) > 1 else max_idx[0]

def idxmin(self, skipna=True):
"""
Return the row label of the minimum value.

If multiple values equal the minimum, the row label with that
If multiple values equal the minimum, the first row label with that
value is returned.

Parameters
Expand Down Expand Up @@ -3250,28 +3263,41 @@ def idxmin(self, skipna=True):

>>> s.idxmin()
('b', 'f')

If multiple values equal the minimum, the first row label with that
value is returned.

>>> s = ks.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
>>> s
10 1
3 100
5 1
2 100
1 1
8 100
Name: 0, dtype: int64

>>> s.idxmin()
10
"""
if len(self) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")

sdf = self._internal._sdf
scol = self._scol
index_scols = self._internal.index_scols
# asc_nulls_(list|first)is used via Py4J directly because
# it's not supported in Spark 2.3.
if skipna:
sdf = sdf.orderBy(Column(scol._jc.asc_nulls_last()))
min_value = sdf.orderBy(scol.asc_nulls_last()).select([scol]).first()[0]
else:
sdf = sdf.orderBy(Column(scol._jc.asc_nulls_first()))
results = sdf.select([scol] + index_scols).take(1)
if len(results) == 0:
raise ValueError("attempt to get idxmin of an empty sequence")
if results[0][0] is None:
# This will only happens when skipna is False because we will
# place nulls first.
min_value = sdf.orderBy(scol.asc_nulls_first()).select([scol]).first()[0]

if min_value is None:
return np.nan
values = list(results[0][1:])
if len(values) == 1:
return values[0]
else:
return tuple(values)

sdf = sdf.filter(scol == min_value)
min_idx = tuple(sdf.select(index_scols).first())

return min_idx if len(min_idx) > 1 else min_idx[0]

def copy(self) -> 'Series':
"""
Expand Down
12 changes: 12 additions & 0 deletions databricks/koalas/tests/test_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -660,6 +660,12 @@ def test_idxmax(self):
with self.assertRaisesRegex(ValueError, "an empty sequence"):
kser.idxmax()

pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
kser = ks.Series(pser)

self.assertEqual(kser.idxmax(), pser.idxmax())
self.assertEqual(repr(kser.idxmax(skipna=False)), repr(pser.idxmax(skipna=False)))

def test_idxmin(self):
pser = pd.Series(data=[1, 4, 5], index=['A', 'B', 'C'])
kser = ks.Series(pser)
Expand All @@ -679,6 +685,12 @@ def test_idxmin(self):
with self.assertRaisesRegex(ValueError, "an empty sequence"):
kser.idxmin()

pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])
kser = ks.Series(pser)

self.assertEqual(kser.idxmin(), pser.idxmin())
self.assertEqual(repr(kser.idxmin(skipna=False)), repr(pser.idxmin(skipna=False)))

def test_shift(self):
pser = pd.Series([10, 20, 15, 30, 45], name='x')
kser = ks.Series(pser)
Expand Down