diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md
index a313d6cc33..10c3e5a2ec 100644
--- a/RELEASE-NOTES.md
+++ b/RELEASE-NOTES.md
@@ -21,6 +21,7 @@ It also brings some dreadfully awaited fixes, so be sure to go through the chang
 - `plot_posterior_predictive_glm` now works with `arviz.InferenceData` as well (see [#4234](https://github.com/pymc-devs/pymc3/pull/4234))
 - Add `logcdf` method to all univariate discrete distributions (see [#4387](https://github.com/pymc-devs/pymc3/pull/4387)).
 - Add `random` method to `MvGaussianRandomWalk` (see [#4388](https://github.com/pymc-devs/pymc3/pull/4388))
+- `AsymmetricLaplace` distribution added (see [#4392](https://github.com/pymc-devs/pymc3/pull/4392)).
 
 ### Maintenance
 - Fixed bug whereby partial traces returns after keyboard interrupt during parallel sampling had fewer draws than would've been available [#4318](https://github.com/pymc-devs/pymc3/pull/4318)
diff --git a/docs/source/api/distributions/continuous.rst b/docs/source/api/distributions/continuous.rst
index fcc49d2e11..9222dcb797 100644
--- a/docs/source/api/distributions/continuous.rst
+++ b/docs/source/api/distributions/continuous.rst
@@ -16,6 +16,7 @@ Continuous
    Kumaraswamy
    Exponential
    Laplace
+   AsymmetricLaplace
    StudentT
    HalfStudentT
    Cauchy
diff --git a/pymc3/distributions/__init__.py b/pymc3/distributions/__init__.py
index ade7c9ef00..afbdf76b04 100644
--- a/pymc3/distributions/__init__.py
+++ b/pymc3/distributions/__init__.py
@@ -16,6 +16,7 @@
 from pymc3.distributions.bart import BART
 from pymc3.distributions.bound import Bound
 from pymc3.distributions.continuous import (
+    AsymmetricLaplace,
     Beta,
     Cauchy,
     ChiSquared,
@@ -160,6 +161,7 @@
     "LKJCorr",
     "AR1",
     "AR",
+    "AsymmetricLaplace",
     "GaussianRandomWalk",
     "MvGaussianRandomWalk",
     "MvStudentTRandomWalk",
diff --git a/pymc3/distributions/continuous.py b/pymc3/distributions/continuous.py
index 25a97f7688..c60898ab16 100644
--- a/pymc3/distributions/continuous.py
+++ b/pymc3/distributions/continuous.py
@@ -80,6 +80,7 @@
     "Interpolated",
     "Rice",
     "Moyal",
+    "AsymmetricLaplace",
 ]
 
 
@@ -1661,6 +1662,106 @@ def logcdf(self, value):
         )
 
 
+class AsymmetricLaplace(Continuous):
+    r"""
+    Asymmetric-Laplace log-likelihood.
+
+    The pdf of this distribution is
+
+    .. math::
+        {f(x|\\b,\kappa,\mu) =
+            \left({\frac{\\b}{\kappa + 1/\kappa}}\right)\,e^{-(x-\mu)\\b\,s\kappa ^{s}}}
+
+    where
+
+    .. math::
+
+        s = sgn(x-\mu)
+
+    ========  ========================
+    Support   :math:`x \in \mathbb{R}`
+    Mean      :math:`\mu-\frac{\\\kappa-1/\kappa}b`
+    Variance  :math:`\frac{1+\kappa^{4}}{b^2\kappa^2 }`
+    ========  ========================
+
+    Parameters
+    ----------
+    b: float
+        Scale parameter (b > 0)
+    kappa: float
+        Symmetry parameter (kappa > 0)
+    mu: float
+        Location parameter
+
+    See Also:
+    --------
+    `Reference <https://en.wikipedia.org/wiki/Asymmetric_Laplace_distribution>`_
+    """
+
+    def __init__(self, b, kappa, mu=0, *args, **kwargs):
+        self.b = tt.as_tensor_variable(floatX(b))
+        self.kappa = tt.as_tensor_variable(floatX(kappa))
+        self.mu = mu = tt.as_tensor_variable(floatX(mu))
+
+        self.mean = self.mu - (self.kappa - 1 / self.kappa) / b
+        self.variance = (1 + self.kappa ** 4) / (self.kappa ** 2 * self.b ** 2)
+
+        assert_negative_support(kappa, "kappa", "AsymmetricLaplace")
+        assert_negative_support(b, "b", "AsymmetricLaplace")
+
+        super().__init__(*args, **kwargs)
+
+    def _random(self, b, kappa, mu, size=None):
+        u = np.random.uniform(size=size)
+        switch = kappa ** 2 / (1 + kappa ** 2)
+        non_positive_x = mu + kappa * np.log(u * (1 / switch)) / b
+        positive_x = mu - np.log((1 - u) * (1 + kappa ** 2)) / (kappa * b)
+        draws = non_positive_x * (u <= switch) + positive_x * (u > switch)
+        return draws
+
+    def random(self, point=None, size=None):
+        """
+        Draw random samples from this distribution, using the inverse CDF method.
+
+        Parameters
+        ----------
+        point: dict, optional
+            Dict of variable values on which random values are to be
+            conditioned (uses default point if not specified).
+        size:int, optional
+            Desired size of random sample (returns one sample if not
+            specified).
+
+        Returns
+        -------
+        array
+        """
+        b, kappa, mu = draw_values([self.b, self.kappa, self.mu], point=point, size=size)
+        return generate_samples(self._random, b, kappa, mu, dist_shape=self.shape, size=size)
+
+    def logp(self, value):
+        """
+        Calculate log-probability of Asymmetric-Laplace distribution at specified value.
+
+        Parameters
+        ----------
+        value: numeric
+            Value(s) for which log-probability is calculated. If the log probabilities for multiple
+            values are desired the values must be provided in a numpy array or theano tensor
+
+        Returns
+        -------
+        TensorVariable
+        """
+        value = value - self.mu
+        return bound(
+            tt.log(self.b / (self.kappa + (self.kappa ** -1)))
+            + (-value * self.b * tt.sgn(value) * (self.kappa ** tt.sgn(value))),
+            0 < self.b,
+            0 < self.kappa,
+        )
+
+
 class Lognormal(PositiveContinuous):
     r"""
     Log-normal log-likelihood.
diff --git a/pymc3/tests/test_distributions.py b/pymc3/tests/test_distributions.py
index a2d00a2d60..c2548423f6 100644
--- a/pymc3/tests/test_distributions.py
+++ b/pymc3/tests/test_distributions.py
@@ -33,6 +33,7 @@
 from pymc3.blocking import DictToVarBijection
 from pymc3.distributions import (
     AR1,
+    AsymmetricLaplace,
     Bernoulli,
     Beta,
     BetaBinomial,
@@ -219,6 +220,14 @@ def build_model(distfam, valuedomain, vardomains, extra_args=None):
     return m
 
 
+def laplace_asymmetric_logpdf(value, kappa, b, mu):
+    kapinv = 1 / kappa
+    value = value - mu
+    lPx = value * b * np.where(value >= 0, -kappa, kapinv)
+    lPx += np.log(b / (kappa + kapinv))
+    return lPx
+
+
 def integrate_nd(f, domain, shape, dtype):
     if shape == () or shape == (1,):
         if dtype in continuous_types:
@@ -987,6 +996,14 @@ def test_laplace(self):
             lambda value, mu, b: sp.laplace.logcdf(value, mu, b),
         )
 
+    def test_laplace_asymmetric(self):
+        self.pymc3_matches_scipy(
+            AsymmetricLaplace,
+            R,
+            {"b": Rplus, "kappa": Rplus, "mu": R},
+            laplace_asymmetric_logpdf,
+        )
+
     def test_lognormal(self):
         self.pymc3_matches_scipy(
             Lognormal,
diff --git a/pymc3/tests/test_distributions_random.py b/pymc3/tests/test_distributions_random.py
index 5e735b6aed..6cc4fe8042 100644
--- a/pymc3/tests/test_distributions_random.py
+++ b/pymc3/tests/test_distributions_random.py
@@ -375,6 +375,11 @@ class TestLaplace(BaseTestCases.BaseTestCase):
     params = {"mu": 1.0, "b": 1.0}
 
 
+class TestAsymmetricLaplace(BaseTestCases.BaseTestCase):
+    distribution = pm.AsymmetricLaplace
+    params = {"kappa": 1.0, "b": 1.0, "mu": 0.0}
+
+
 class TestLognormal(BaseTestCases.BaseTestCase):
     distribution = pm.Lognormal
     params = {"mu": 1.0, "tau": 1.0}
@@ -626,6 +631,17 @@ def ref_rand(size, mu, b):
 
         pymc3_random(pm.Laplace, {"mu": R, "b": Rplus}, ref_rand=ref_rand)
 
+    def test_laplace_asymmetric(self):
+        def ref_rand(size, kappa, b, mu):
+            u = np.random.uniform(size=size)
+            switch = kappa ** 2 / (1 + kappa ** 2)
+            non_positive_x = mu + kappa * np.log(u * (1 / switch)) / b
+            positive_x = mu - np.log((1 - u) * (1 + kappa ** 2)) / (kappa * b)
+            draws = non_positive_x * (u <= switch) + positive_x * (u > switch)
+            return draws
+
+        pymc3_random(pm.AsymmetricLaplace, {"b": Rplus, "kappa": Rplus, "mu": R}, ref_rand=ref_rand)
+
     def test_lognormal(self):
         def ref_rand(size, mu, tau):
             return np.exp(mu + (tau ** -0.5) * st.norm.rvs(loc=0.0, scale=1.0, size=size))