--- a/inst/csaps_sel.m
+++ b/inst/csaps_sel.m
@@ -22,7 +22,9 @@
 ##
 ## The chosen cubic spline with natural boundary conditions @var{pp}(@var{x}) minimizes @var{p} Sum_i @var{w}_i*(@var{y}_i - @var{pp}(@var{x}_i))^2  +  (1-@var{p}) Int @var{pp}''(@var{x}) d@var{x}.
 ##
-## A selection criterion @var{crit} is used to find a suitable value for @var{p} (between 0 and 1); possible values for @var{crit} are  `vm' (Vapnik's measure [Cherkassky and Mulier 2007] from statistical learning theory); `aicc' (corrected Akaike information criterion, the default); `aic' (original Akaike information criterion); `gcv' (generalized cross validation). If @var{crit} is a scalar instead of a string, then @var{p} is chosen to so that the mean square scaled residual Mean_i (@var{w}_i*(@var{y}_i - @var{pp}(@var{x}_i))^2) is approximately equal to @var{crit}.
+## A selection criterion @var{crit} is used to find a suitable value for @var{p} (between 0 and 1); possible values for @var{crit} are  `vm' (Vapnik's measure [Cherkassky and Mulier 2007] from statistical learning theory); `aicc' (corrected Akaike information criterion, the default); `aic' (original Akaike information criterion); `gcv' (generalized cross validation).
+##
+## If @var{crit} is a nonnegative scalar instead of a string, then @var{p} is chosen to so that the mean square scaled residual Mean_i (@var{w}_i*(@var{y}_i - @var{pp}(@var{x}_i))^2) is approximately equal to @var{crit}. If @var{crit} is a negative scalar, then @var{p} is chosen so that the effective number of degrees of freedom in the spline fit (which ranges from 2 when @var{p} = 0 to @var{n} when @var{p} = 1) is approximately equal to -@var{crit}.
 ##
 ## @var{x} and @var{w} should be @var{n} by 1 in size; @var{y} should be @var{n} by @var{m}; @var{xi} should be @var{k} by 1; the values in @var{x} should be distinct and in ascending order; the values in @var{w} should be nonzero.
 ##
@@ -83,53 +85,64 @@
   end
 
   if isscalar(crit)
-    if crit <= 0 #return an exact cubic spline interpolation
+    if crit == 0 || crit <= -n #return an exact cubic spline interpolation
         [ret,p]=csaps(x,y,1,xi,w);
         sigma2 = 0; unc_y = zeros(size(x));
         return
-      end
-    w = w ./ crit; #adjust the sample weights so that the target mean square scaled residual is 1
-    crit = 'msr_bound';
+    elseif crit > 0
+      w = w ./ crit; #adjust the sample weights so that the target mean square scaled residual is 1
+      crit = 'msr_bound';
+    else #negative value -- target degrees of freedom of fit
+      if crit >= -2 #return linear regression
+        p = 0;
+      else
+        global df_target
+        df_target = -crit;
+        crit = 'df_bound';
+      endif
+    endif  
   end	
 
   if isempty(crit)
     crit = 'aicc';
   end
 
-  #R = spdiags([h(1:end-1) 2*(h(1:end-1) + h(2:end)) h(2:end)], [-1 0 1], n-2, n-2);
   R = spdiags([h(2:end) 2*(h(1:end-1) + h(2:end)) h(1:end-1)], [-1 0 1], n-2, n-2);
 
   QT = spdiags([1 ./ h(1:end-1) -(1 ./ h(1:end-1) + 1 ./ h(2:end)) 1 ./ h(2:end)], [0 1 2], n-2, n);
 
 
-chol_method = (n > 300); #use a sparse Cholesky decomposition followed by solving for only the central bands of the inverse to solve for large n (faster), and singular value decomposition for small n (less prone to numerical error if data values are spaced close together)
-
-##choose p by minimizing the penalty function
-  
-if chol_method
-  penalty_function = @(p) penalty_compute_chol(p, QT, R, y, w, n, crit);
-else
-  ##determine influence matrix for different p without repeated inversion
-  [U D V] = svd(diag(1 ./ sqrt(w))*QT'*sqrtm(inv(R)), 0); D = diag(D).^2;
-  penalty_function = @(p) penalty_compute(p, U, D, y, w, n, crit);
-end
-
-  p = fminbnd(penalty_function, 0, 1);
-
-## estimate the trend uncertainty
-if chol_method
-  [MSR, Ht] = penalty_terms_chol(p, QT, R, y, w, n);
-  Hd = influence_matrix_diag_chol(p, QT, R, y, w, n);
-else
-  H = influence_matrix(p, U, D, n, w);
-  [MSR, Ht] = penalty_terms(H, y, w);
-  Hd = diag(H);
-end
+  chol_method = (n > 300); #use a sparse Cholesky decomposition followed by solving for only the central bands of the inverse to solve for large n (faster), and singular value decomposition for small n (less prone to numerical error if data values are spaced close together)
+      
+  if chol_method
+    penalty_function = @(p) penalty_compute_chol(p, QT, R, y, w, n, crit);
+  else
+    ##determine influence matrix for different p without repeated inversion
+    [U D V] = svd(diag(1 ./ sqrt(w))*QT'*sqrtm(inv(R)), 0); D = diag(D).^2;
+    penalty_function = @(p) penalty_compute(p, U, D, y, w, n, crit);
+  end
+
+  if ~exist("p", "var")
+    ##choose p by minimizing the penalty function
+
+    p = fminbnd(penalty_function, 0, 1);
+
+  endif
+
+  ## estimate the trend uncertainty
+  if chol_method
+    [MSR, Ht] = penalty_terms_chol(p, QT, R, y, w, n);
+    Hd = influence_matrix_diag_chol(p, QT, R, y, w, n);
+  else
+    H = influence_matrix(p, U, D, n, w);
+    [MSR, Ht] = penalty_terms(H, y, w);
+    Hd = diag(H);
+  end
 
   sigma2 = mean(MSR(:)) * (n / (n-Ht)); #estimated data error variance (wahba83)
   unc_y = sqrt(sigma2 * Hd ./ w); #uncertainty (SD) of fitted curve at each input x-value (hutchinson86)
 
-## construct the fitted smoothing spline 
+  ## construct the fitted smoothing spline 
   [ret,p]=csaps(x,y,p,xi,w);
 
 endfunction
@@ -194,6 +207,11 @@
 
 function J = msr_bound(MSR, Ht, n)
   J = mean(MSR(:) - 1) .^ 2;
+endfunction
+
+function J = df_bound(MSR, Ht, n)
+  global df_target
+  J = (Ht - df_target) .^ 2;
 endfunction
 
 function J = penalty_compute(p, U, D, y, w, n, crit) #evaluates a user-supplied penalty function crit at given p