Skip to content

Remove unused functions, cimports #19360

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jan 24, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 2 additions & 18 deletions pandas/_libs/index.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,6 @@ cpdef object get_value_box(ndarray arr, object loc):
return util.get_value_1d(arr, i)


def set_value_at(ndarray arr, object loc, object val):
return util.set_value_at(arr, loc, val)


# Don't populate hash tables in monotonic indexes larger than this
_SIZE_CUTOFF = 1000000

Expand Down Expand Up @@ -404,18 +400,6 @@ cdef Py_ssize_t _bin_search(ndarray values, object val) except -1:
else:
return mid + 1

_pad_functions = {
'object': algos.pad_object,
'int64': algos.pad_int64,
'float64': algos.pad_float64
}

_backfill_functions = {
'object': algos.backfill_object,
'int64': algos.backfill_int64,
'float64': algos.backfill_float64
}


cdef class DatetimeEngine(Int64Engine):

Expand Down Expand Up @@ -566,7 +550,7 @@ cpdef convert_scalar(ndarray arr, object value):
# we don't turn bools into int/float/complex

if arr.descr.type_num == NPY_DATETIME:
if isinstance(value, np.ndarray):
if util.is_array(value):
pass
elif isinstance(value, (datetime, np.datetime64, date)):
return Timestamp(value).value
Expand All @@ -577,7 +561,7 @@ cpdef convert_scalar(ndarray arr, object value):
raise ValueError("cannot set a Timestamp with a non-timestamp")

elif arr.descr.type_num == NPY_TIMEDELTA:
if isinstance(value, np.ndarray):
if util.is_array(value):
pass
elif isinstance(value, timedelta):
return Timedelta(value).value
Expand Down
7 changes: 4 additions & 3 deletions pandas/_libs/internals.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ cimport cython
from cython cimport Py_ssize_t

from cpython cimport PyObject
from cpython.slice cimport PySlice_Check

cdef extern from "Python.h":
Py_ssize_t PY_SSIZE_T_MAX
Expand Down Expand Up @@ -32,7 +33,7 @@ cdef class BlockPlacement:
self._has_slice = False
self._has_array = False

if isinstance(val, slice):
if PySlice_Check(val):
slc = slice_canonize(val)

if slc.start != slc.stop:
Expand Down Expand Up @@ -118,7 +119,7 @@ cdef class BlockPlacement:
else:
val = self._as_array[loc]

if not isinstance(val, slice) and val.ndim == 0:
if not PySlice_Check(val) and val.ndim == 0:
return val

return BlockPlacement(val)
Expand Down Expand Up @@ -288,7 +289,7 @@ def slice_getitem(slice slc not None, ind):

s_start, s_stop, s_step, s_len = slice_get_indices_ex(slc)

if isinstance(ind, slice):
if PySlice_Check(ind):
ind_start, ind_stop, ind_step, ind_len = slice_get_indices_ex(ind,
s_len)

Expand Down
10 changes: 1 addition & 9 deletions pandas/_libs/lib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@ from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM,
np.import_array()
np.import_ufunc()

from libc.stdlib cimport malloc, free

from cpython cimport (Py_INCREF, PyTuple_SET_ITEM,
PyList_Check, PyFloat_Check,
PyString_Check,
Expand All @@ -27,8 +25,7 @@ from cpython cimport (Py_INCREF, PyTuple_SET_ITEM,
PyTuple_New,
PyObject_RichCompareBool,
PyBytes_GET_SIZE,
PyUnicode_GET_SIZE,
PyObject)
PyUnicode_GET_SIZE)

try:
from cpython cimport PyString_GET_SIZE
Expand All @@ -37,17 +34,12 @@ except ImportError:

cimport cpython

isnan = np.isnan
cdef double NaN = <double> np.NaN
cdef double nan = NaN

from cpython.datetime cimport (PyDateTime_Check, PyDate_Check,
PyTime_Check, PyDelta_Check,
PyDateTime_IMPORT)
PyDateTime_IMPORT

from tslibs.np_datetime cimport get_timedelta64_value, get_datetime64_value

from tslib import NaT, Timestamp, Timedelta, array_to_datetime
from interval import Interval
from missing cimport checknull
Expand Down
13 changes: 6 additions & 7 deletions pandas/_libs/reduction.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ is_numpy_prior_1_6_2 = LooseVersion(np.__version__) < '1.6.2'

cdef _get_result_array(object obj, Py_ssize_t size, Py_ssize_t cnt):

if isinstance(obj, np.ndarray) \
or isinstance(obj, list) and len(obj) == cnt \
or getattr(obj, 'shape', None) == (cnt,):
if (util.is_array(obj) or
isinstance(obj, list) and len(obj) == cnt or
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could do PyListCheck here

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

or change the other below, I find this slightly non-idiomatic

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks like cython automatically converts to PyList_Check.

Agreed on a lot of the idiomatic point. There's a lot of built-up debris in _libs.

getattr(obj, 'shape', None) == (cnt,)):
raise ValueError('function does not reduce')

return np.empty(size, dtype='O')
Expand Down Expand Up @@ -150,8 +150,7 @@ cdef class Reducer:
else:
res = self.f(chunk)

if hasattr(res, 'values') and isinstance(
res.values, np.ndarray):
if hasattr(res, 'values') and util.is_array(res.values):
res = res.values
if i == 0:
result = _get_result_array(res,
Expand Down Expand Up @@ -433,10 +432,10 @@ cdef class SeriesGrouper:
cdef inline _extract_result(object res):
""" extract the result object, it might be a 0-dim ndarray
or a len-1 0-dim, or a scalar """
if hasattr(res, 'values') and isinstance(res.values, np.ndarray):
if hasattr(res, 'values') and util.is_array(res.values):
res = res.values
if not np.isscalar(res):
if isinstance(res, np.ndarray):
if util.is_array(res):
if res.ndim == 0:
res = res.item()
elif res.ndim == 1 and len(res) == 1:
Expand Down
9 changes: 4 additions & 5 deletions pandas/_libs/src/inference.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,9 @@ from datetime import datetime, timedelta
iNaT = util.get_nat()

cdef bint PY2 = sys.version_info[0] == 2
cdef double nan = <double> np.NaN

from util cimport (UINT8_MAX, UINT16_MAX, UINT32_MAX, UINT64_MAX,
INT8_MIN, INT8_MAX, INT16_MIN, INT16_MAX,
INT32_MAX, INT32_MIN, INT64_MAX, INT64_MIN)
from util cimport UINT8_MAX, UINT64_MAX, INT64_MAX, INT64_MIN

# core.common import for fast inference checks

Expand Down Expand Up @@ -331,7 +330,7 @@ def infer_dtype(object value, bint skipna=False):
bint seen_pdnat = False
bint seen_val = False

if isinstance(value, np.ndarray):
if util.is_array(value):
values = value
elif hasattr(value, 'dtype'):

Expand All @@ -349,7 +348,7 @@ def infer_dtype(object value, bint skipna=False):
raise ValueError("cannot infer type for {0}".format(type(value)))

else:
if not isinstance(value, list):
if not PyList_Check(value):
value = list(value)
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike)
Expand Down
10 changes: 6 additions & 4 deletions pandas/_libs/tslibs/offsets.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -306,8 +306,8 @@ class _BaseOffset(object):
def __call__(self, other):
return self.apply(other)

def __mul__(self, someInt):
return self.__class__(n=someInt * self.n, normalize=self.normalize,
def __mul__(self, other):
return self.__class__(n=other * self.n, normalize=self.normalize,
**self.kwds)

def __neg__(self):
Expand Down Expand Up @@ -374,8 +374,8 @@ class _BaseOffset(object):

class BaseOffset(_BaseOffset):
# Here we add __rfoo__ methods that don't play well with cdef classes
def __rmul__(self, someInt):
return self.__mul__(someInt)
def __rmul__(self, other):
return self.__mul__(other)

def __radd__(self, other):
return self.__add__(other)
Expand Down Expand Up @@ -840,6 +840,8 @@ cpdef int roll_qtrday(datetime other, int n, int month, object day_opt,
-------
n : int number of periods to increment
"""
cdef:
int months_since
# TODO: Merge this with roll_yearday by setting modby=12 there?
# code de-duplication versus perf hit?
# TODO: with small adjustments this could be used in shift_quarters
Expand Down
17 changes: 0 additions & 17 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -1936,10 +1936,6 @@ def _convert_key(self, key, is_setter=False):
return key


# 32-bit floating point machine epsilon
_eps = 1.1920929e-07


def length_of_indexer(indexer, target=None):
"""return the length of a single non-tuple indexer which could be a slice
"""
Expand Down Expand Up @@ -1992,19 +1988,6 @@ def convert_to_index_sliceable(obj, key):
return None


def is_index_slice(obj):
def _is_valid_index(x):
return (is_integer(x) or is_float(x) and
np.allclose(x, int(x), rtol=_eps, atol=0))

def _crit(v):
return v is None or _is_valid_index(v)

both_none = obj.start is None and obj.stop is None

return not both_none and (_crit(obj.start) and _crit(obj.stop))


def check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/sparse/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from pandas.core import generic
import pandas.core.common as com
import pandas.core.ops as ops
import pandas._libs.index as _index
import pandas._libs.index as libindex
from pandas.util._decorators import Appender

from pandas.core.sparse.array import (
Expand Down Expand Up @@ -560,7 +560,7 @@ def _set_values(self, key, value):
key = key.values

values = self.values.to_dense()
values[key] = _index.convert_scalar(values, value)
values[key] = libindex.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
Expand Down