diff --git a/Cargo.toml b/Cargo.toml index f2177a99a9b88..f10d539d8296b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,19 @@ debug-assertions = false debug = false debug-assertions = false +[profile.release.package.compiler_builtins] +# For compiler-builtins we always use a high number of codegen units. +# The goal here is to place every single intrinsic into its own object +# file to avoid symbol clashes with the system libgcc if possible. Note +# that this number doesn't actually produce this many object files, we +# just don't create more than this number of object files. +# +# It's a bit of a bummer that we have to pass this here, unfortunately. +# Ideally this would be specified through an env var to Cargo so Cargo +# knows how many CGUs are for this specific crate, but for now +# per-crate configuration isn't specifiable in the environment. +codegen-units = 10000 + # We want the RLS to use the version of Cargo that we've got vendored in this # repository to ensure that the same exact version of Cargo is used by both the # RLS and the Cargo binary itself. The RLS depends on Cargo as a git repository diff --git a/src/librustc_lexer/src/lib.rs b/src/librustc_lexer/src/lib.rs index c2139d07f378a..77b3d26463dfe 100644 --- a/src/librustc_lexer/src/lib.rs +++ b/src/librustc_lexer/src/lib.rs @@ -29,7 +29,7 @@ mod tests; use self::LiteralKind::*; use self::TokenKind::*; use crate::cursor::{Cursor, EOF_CHAR}; -use std::convert::TryInto; +use std::convert::TryFrom; /// Parsed token. /// It doesn't contain information about data that has been parsed, @@ -142,84 +142,24 @@ pub enum LiteralKind { /// "b"abc"", "b"abc" ByteStr { terminated: bool }, /// "r"abc"", "r#"abc"#", "r####"ab"###"c"####", "r#"a" - RawStr(UnvalidatedRawStr), + RawStr { n_hashes: u16, err: Option }, /// "br"abc"", "br#"abc"#", "br####"ab"###"c"####", "br#"a" - RawByteStr(UnvalidatedRawStr), -} - -/// Represents something that looks like a raw string, but may have some -/// problems. Use `.validate()` to convert it into something -/// usable. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct UnvalidatedRawStr { - /// The prefix (`r###"`) is valid - valid_start: bool, - - /// The postfix (`"###`) is valid - valid_end: bool, - - /// The number of leading `#` - n_start_hashes: usize, - /// The number of trailing `#`. `n_end_hashes` <= `n_start_hashes` - n_end_hashes: usize, - /// The offset starting at `r` or `br` where the user may have intended to end the string. - /// Currently, it is the longest sequence of pattern `"#+"`. - possible_terminator_offset: Option, + RawByteStr { n_hashes: u16, err: Option }, } /// Error produced validating a raw string. Represents cases like: -/// - `r##~"abcde"##`: `LexRawStrError::InvalidStarter` -/// - `r###"abcde"##`: `LexRawStrError::NoTerminator { expected: 3, found: 2, possible_terminator_offset: Some(11)` -/// - Too many `#`s (>65536): `TooManyDelimiters` +/// - `r##~"abcde"##`: `InvalidStarter` +/// - `r###"abcde"##`: `NoTerminator { expected: 3, found: 2, possible_terminator_offset: Some(11)` +/// - Too many `#`s (>65535): `TooManyDelimiters` #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub enum LexRawStrError { +pub enum RawStrError { /// Non `#` characters exist between `r` and `"` eg. `r#~"..` - InvalidStarter, + InvalidStarter { bad_char: char }, /// The string was never terminated. `possible_terminator_offset` is the number of characters after `r` or `br` where they /// may have intended to terminate it. NoTerminator { expected: usize, found: usize, possible_terminator_offset: Option }, - /// More than 65536 `#`s exist. - TooManyDelimiters, -} - -/// Raw String that contains a valid prefix (`#+"`) and postfix (`"#+`) where -/// there are a matching number of `#` characters in both. Note that this will -/// not consume extra trailing `#` characters: `r###"abcde"####` is lexed as a -/// `ValidatedRawString { n_hashes: 3 }` followed by a `#` token. -#[derive(Debug, Eq, PartialEq, Copy, Clone)] -pub struct ValidatedRawStr { - n_hashes: u16, -} - -impl ValidatedRawStr { - pub fn num_hashes(&self) -> u16 { - self.n_hashes - } -} - -impl UnvalidatedRawStr { - pub fn validate(self) -> Result { - if !self.valid_start { - return Err(LexRawStrError::InvalidStarter); - } - - // Only up to 65535 `#`s are allowed in raw strings - let n_start_safe: u16 = - self.n_start_hashes.try_into().map_err(|_| LexRawStrError::TooManyDelimiters)?; - - if self.n_start_hashes > self.n_end_hashes || !self.valid_end { - Err(LexRawStrError::NoTerminator { - expected: self.n_start_hashes, - found: self.n_end_hashes, - possible_terminator_offset: self.possible_terminator_offset, - }) - } else { - // Since the lexer should never produce a literal with n_end > n_start, if n_start <= n_end, - // they must be equal. - debug_assert_eq!(self.n_start_hashes, self.n_end_hashes); - Ok(ValidatedRawStr { n_hashes: n_start_safe }) - } - } + /// More than 65535 `#`s exist. + TooManyDelimiters { found: usize }, } /// Base of numeric literal encoding according to its prefix. @@ -239,21 +179,18 @@ pub enum Base { /// but shebang isn't a part of rust syntax. pub fn strip_shebang(input: &str) -> Option { // Shebang must start with `#!` literally, without any preceding whitespace. - if input.starts_with("#!") { - let input_tail = &input[2..]; - // Shebang must have something non-whitespace after `#!` on the first line. - let first_line_tail = input_tail.lines().next()?; - if first_line_tail.contains(|c| !is_whitespace(c)) { - // Ok, this is a shebang but if the next non-whitespace token is `[` or maybe - // a doc comment (due to `TokenKind::(Line,Block)Comment` ambiguity at lexer level), - // then it may be valid Rust code, so consider it Rust code. - let next_non_whitespace_token = tokenize(input_tail).map(|tok| tok.kind).filter(|tok| - !matches!(tok, TokenKind::Whitespace | TokenKind::LineComment | TokenKind::BlockComment { .. }) - ).next(); - if next_non_whitespace_token != Some(TokenKind::OpenBracket) { - // No other choice than to consider this a shebang. - return Some(2 + first_line_tail.len()); - } + // For simplicity we consider any line starting with `#!` a shebang, + // regardless of restrictions put on shebangs by specific platforms. + if let Some(input_tail) = input.strip_prefix("#!") { + // Ok, this is a shebang but if the next non-whitespace token is `[` or maybe + // a doc comment (due to `TokenKind::(Line,Block)Comment` ambiguity at lexer level), + // then it may be valid Rust code, so consider it Rust code. + let next_non_whitespace_token = tokenize(input_tail).map(|tok| tok.kind).find(|tok| + !matches!(tok, TokenKind::Whitespace | TokenKind::LineComment | TokenKind::BlockComment { .. }) + ); + if next_non_whitespace_token != Some(TokenKind::OpenBracket) { + // No other choice than to consider this a shebang. + return Some(2 + input_tail.lines().next().unwrap_or_default().len()); } } None @@ -354,12 +291,12 @@ impl Cursor<'_> { 'r' => match (self.first(), self.second()) { ('#', c1) if is_id_start(c1) => self.raw_ident(), ('#', _) | ('"', _) => { - let raw_str_i = self.raw_double_quoted_string(1); + let (n_hashes, err) = self.raw_double_quoted_string(1); let suffix_start = self.len_consumed(); - if raw_str_i.n_end_hashes == raw_str_i.n_start_hashes { + if err.is_none() { self.eat_literal_suffix(); } - let kind = RawStr(raw_str_i); + let kind = RawStr { n_hashes, err }; Literal { kind, suffix_start } } _ => self.ident(), @@ -389,14 +326,12 @@ impl Cursor<'_> { } ('r', '"') | ('r', '#') => { self.bump(); - let raw_str_i = self.raw_double_quoted_string(2); + let (n_hashes, err) = self.raw_double_quoted_string(2); let suffix_start = self.len_consumed(); - let terminated = raw_str_i.n_start_hashes == raw_str_i.n_end_hashes; - if terminated { + if err.is_none() { self.eat_literal_suffix(); } - - let kind = RawByteStr(raw_str_i); + let kind = RawByteStr { n_hashes, err }; Literal { kind, suffix_start } } _ => self.ident(), @@ -692,27 +627,34 @@ impl Cursor<'_> { false } - /// Eats the double-quoted string and returns an `UnvalidatedRawStr`. - fn raw_double_quoted_string(&mut self, prefix_len: usize) -> UnvalidatedRawStr { + /// Eats the double-quoted string and returns `n_hashes` and an error if encountered. + fn raw_double_quoted_string(&mut self, prefix_len: usize) -> (u16, Option) { + // Wrap the actual function to handle the error with too many hashes. + // This way, it eats the whole raw string. + let (n_hashes, err) = self.raw_string_unvalidated(prefix_len); + // Only up to 65535 `#`s are allowed in raw strings + match u16::try_from(n_hashes) { + Ok(num) => (num, err), + // We lie about the number of hashes here :P + Err(_) => (0, Some(RawStrError::TooManyDelimiters { found: n_hashes })), + } + } + + fn raw_string_unvalidated(&mut self, prefix_len: usize) -> (usize, Option) { debug_assert!(self.prev() == 'r'); - let mut valid_start: bool = false; let start_pos = self.len_consumed(); - let (mut possible_terminator_offset, mut max_hashes) = (None, 0); + let mut possible_terminator_offset = None; + let mut max_hashes = 0; // Count opening '#' symbols. let n_start_hashes = self.eat_while(|c| c == '#'); // Check that string is started. match self.bump() { - Some('"') => valid_start = true, - _ => { - return UnvalidatedRawStr { - valid_start, - valid_end: false, - n_start_hashes, - n_end_hashes: 0, - possible_terminator_offset, - }; + Some('"') => (), + c => { + let c = c.unwrap_or(EOF_CHAR); + return (n_start_hashes, Some(RawStrError::InvalidStarter { bad_char: c })); } } @@ -722,13 +664,14 @@ impl Cursor<'_> { self.eat_while(|c| c != '"'); if self.is_eof() { - return UnvalidatedRawStr { - valid_start, - valid_end: false, + return ( n_start_hashes, - n_end_hashes: max_hashes, - possible_terminator_offset, - }; + Some(RawStrError::NoTerminator { + expected: n_start_hashes, + found: max_hashes, + possible_terminator_offset, + }), + ); } // Eat closing double quote. @@ -737,7 +680,7 @@ impl Cursor<'_> { // Check that amount of closing '#' symbols // is equal to the amount of opening ones. // Note that this will not consume extra trailing `#` characters: - // `r###"abcde"####` is lexed as a `LexedRawString { n_hashes: 3 }` + // `r###"abcde"####` is lexed as a `RawStr { n_hashes: 3 }` // followed by a `#` token. let mut hashes_left = n_start_hashes; let is_closing_hash = |c| { @@ -751,13 +694,7 @@ impl Cursor<'_> { let n_end_hashes = self.eat_while(is_closing_hash); if n_end_hashes == n_start_hashes { - return UnvalidatedRawStr { - valid_start, - valid_end: true, - n_start_hashes, - n_end_hashes, - possible_terminator_offset: None, - }; + return (n_start_hashes, None); } else if n_end_hashes > max_hashes { // Keep track of possible terminators to give a hint about // where there might be a missing terminator diff --git a/src/librustc_middle/ty/relate.rs b/src/librustc_middle/ty/relate.rs index aeb3a0716fb42..7fb41a1dc5ac2 100644 --- a/src/librustc_middle/ty/relate.rs +++ b/src/librustc_middle/ty/relate.rs @@ -617,12 +617,22 @@ impl<'tcx> Relate<'tcx> for &'tcx ty::List> { a: &Self, b: &Self, ) -> RelateResult<'tcx, Self> { - if a.len() != b.len() { + let tcx = relation.tcx(); + + // FIXME: this is wasteful, but want to do a perf run to see how slow it is. + // We need to perform this deduplication as we sometimes generate duplicate projections + // in `a`. + let mut a_v: Vec<_> = a.into_iter().collect(); + let mut b_v: Vec<_> = b.into_iter().collect(); + a_v.sort_by(|a, b| a.stable_cmp(tcx, b)); + a_v.dedup(); + b_v.sort_by(|a, b| a.stable_cmp(tcx, b)); + b_v.dedup(); + if a_v.len() != b_v.len() { return Err(TypeError::ExistentialMismatch(expected_found(relation, a, b))); } - let tcx = relation.tcx(); - let v = a.iter().zip(b.iter()).map(|(ep_a, ep_b)| { + let v = a_v.into_iter().zip(b_v.into_iter()).map(|(ep_a, ep_b)| { use crate::ty::ExistentialPredicate::*; match (ep_a, ep_b) { (Trait(ref a), Trait(ref b)) => Ok(Trait(relation.relate(a, b)?)), diff --git a/src/librustc_mir/monomorphize/partitioning.rs b/src/librustc_mir/monomorphize/partitioning.rs index db1ea72c0a531..a945c1d626a9a 100644 --- a/src/librustc_mir/monomorphize/partitioning.rs +++ b/src/librustc_mir/monomorphize/partitioning.rs @@ -454,18 +454,11 @@ fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibilit fn merge_codegen_units<'tcx>( tcx: TyCtxt<'tcx>, initial_partitioning: &mut PreInliningPartitioning<'tcx>, - mut target_cgu_count: usize, + target_cgu_count: usize, ) { assert!(target_cgu_count >= 1); let codegen_units = &mut initial_partitioning.codegen_units; - if tcx.is_compiler_builtins(LOCAL_CRATE) { - // Compiler builtins require some degree of control over how mono items - // are partitioned into compilation units. Provide it by keeping the - // original partitioning when compiling the compiler builtins crate. - target_cgu_count = codegen_units.len(); - } - // Note that at this point in time the `codegen_units` here may not be in a // deterministic order (but we know they're deterministically the same set). // We want this merging to produce a deterministic ordering of codegen units diff --git a/src/librustdoc/html/static/main.js b/src/librustdoc/html/static/main.js index ac5a2f96b26c6..596c19fb0a057 100644 --- a/src/librustdoc/html/static/main.js +++ b/src/librustdoc/html/static/main.js @@ -1006,12 +1006,13 @@ function defocusSearchBar() { var aliases = []; var crateAliases = []; var i; - if (filterCrates !== undefined && - ALIASES[filterCrates] && - ALIASES[filterCrates][query.search]) { - for (i = 0; i < ALIASES[crate][query.search].length; ++i) { - aliases.push( - createAliasFromItem(searchIndex[ALIASES[filterCrates][query.search]])); + if (filterCrates !== undefined) { + if (ALIASES[filterCrates] && ALIASES[filterCrates][query.search]) { + for (i = 0; i < ALIASES[filterCrates][query.search].length; ++i) { + aliases.push( + createAliasFromItem( + searchIndex[ALIASES[filterCrates][query.search][i]])); + } } } else { Object.keys(ALIASES).forEach(function(crate) { diff --git a/src/test/codegen-units/partitioning/compiler-builtins.rs b/src/test/codegen-units/partitioning/compiler-builtins.rs deleted file mode 100644 index 25195743b0400..0000000000000 --- a/src/test/codegen-units/partitioning/compiler-builtins.rs +++ /dev/null @@ -1,40 +0,0 @@ -// Verifies that during compiler_builtins compilation the codegen units are kept -// unmerged. Even when only a single codegen unit is requested with -Ccodegen-units=1. -// -// compile-flags: -Zprint-mono-items=eager -Ccodegen-units=1 - -#![compiler_builtins] -#![crate_type="lib"] -#![feature(compiler_builtins)] - -mod atomics { - //~ MONO_ITEM fn compiler_builtins::atomics[0]::sync_1[0] @@ compiler_builtins-cgu.0[External] - #[no_mangle] - pub extern "C" fn sync_1() {} - - //~ MONO_ITEM fn compiler_builtins::atomics[0]::sync_2[0] @@ compiler_builtins-cgu.0[External] - #[no_mangle] - pub extern "C" fn sync_2() {} - - //~ MONO_ITEM fn compiler_builtins::atomics[0]::sync_3[0] @@ compiler_builtins-cgu.0[External] - #[no_mangle] - pub extern "C" fn sync_3() {} -} - -mod x { - //~ MONO_ITEM fn compiler_builtins::x[0]::x[0] @@ compiler_builtins-cgu.1[External] - #[no_mangle] - pub extern "C" fn x() {} -} - -mod y { - //~ MONO_ITEM fn compiler_builtins::y[0]::y[0] @@ compiler_builtins-cgu.2[External] - #[no_mangle] - pub extern "C" fn y() {} -} - -mod z { - //~ MONO_ITEM fn compiler_builtins::z[0]::z[0] @@ compiler_builtins-cgu.3[External] - #[no_mangle] - pub extern "C" fn z() {} -} diff --git a/src/test/rustdoc-js/doc-alias-filter-out.js b/src/test/rustdoc-js/doc-alias-filter-out.js new file mode 100644 index 0000000000000..46a089d06ebef --- /dev/null +++ b/src/test/rustdoc-js/doc-alias-filter-out.js @@ -0,0 +1,9 @@ +// exact-check + +const QUERY = 'true'; + +const FILTER_CRATE = 'some_other_crate'; + +const EXPECTED = { + 'others': [], +}; diff --git a/src/test/rustdoc-js/doc-alias-filter-out.rs b/src/test/rustdoc-js/doc-alias-filter-out.rs new file mode 100644 index 0000000000000..815e8cedd16da --- /dev/null +++ b/src/test/rustdoc-js/doc-alias-filter-out.rs @@ -0,0 +1,4 @@ +#![feature(doc_alias)] + +#[doc(alias = "true")] +pub struct Foo; diff --git a/src/test/rustdoc-js/doc-alias-filter.js b/src/test/rustdoc-js/doc-alias-filter.js new file mode 100644 index 0000000000000..4b1e2e2970479 --- /dev/null +++ b/src/test/rustdoc-js/doc-alias-filter.js @@ -0,0 +1,17 @@ +// exact-check + +const QUERY = 'true'; + +const FILTER_CRATE = 'doc_alias_filter'; + +const EXPECTED = { + 'others': [ + { + 'path': 'doc_alias_filter', + 'name': 'Foo', + 'alias': 'true', + 'href': '../doc_alias_filter/struct.Foo.html', + 'is_alias': true + }, + ], +}; diff --git a/src/test/rustdoc-js/doc-alias-filter.rs b/src/test/rustdoc-js/doc-alias-filter.rs new file mode 100644 index 0000000000000..8887f8c2b0149 --- /dev/null +++ b/src/test/rustdoc-js/doc-alias-filter.rs @@ -0,0 +1,7 @@ +#![feature(doc_alias)] + +#[doc(alias = "true")] +pub struct Foo; + +#[doc(alias = "false")] +pub struct Bar; diff --git a/src/test/ui/issues/issue-59326.rs b/src/test/ui/issues/issue-59326.rs new file mode 100644 index 0000000000000..c0e8837749eb4 --- /dev/null +++ b/src/test/ui/issues/issue-59326.rs @@ -0,0 +1,26 @@ +// check-pass +trait Service { + type S; +} + +trait Framing { + type F; +} + +impl Framing for () { + type F = (); +} + +trait HttpService: Service {} + +type BoxService = Box>; + +fn build_server BoxService>(_: F) {} + +fn make_server() -> Box> { + unimplemented!() +} + +fn main() { + build_server(|| make_server()) +} diff --git a/src/test/ui/parser/shebang/shebang-empty.rs b/src/test/ui/parser/shebang/shebang-empty.rs new file mode 100644 index 0000000000000..e38cc637e945e --- /dev/null +++ b/src/test/ui/parser/shebang/shebang-empty.rs @@ -0,0 +1,4 @@ +#! + +// check-pass +fn main() {} diff --git a/src/test/ui/parser/shebang/shebang-space.rs b/src/test/ui/parser/shebang/shebang-space.rs new file mode 100644 index 0000000000000..0978b759d2a6e --- /dev/null +++ b/src/test/ui/parser/shebang/shebang-space.rs @@ -0,0 +1,5 @@ +#! + +// check-pass +// ignore-tidy-end-whitespace +fn main() {} diff --git a/src/test/ui/shebang.rs b/src/test/ui/shebang.rs deleted file mode 100644 index 3d3ba468be955..0000000000000 --- a/src/test/ui/shebang.rs +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env rustx - -// run-pass - -pub fn main() { println!("Hello World"); } diff --git a/src/tools/rustdoc-js/tester.js b/src/tools/rustdoc-js/tester.js index 163571bc5b988..139e6f73f4216 100644 --- a/src/tools/rustdoc-js/tester.js +++ b/src/tools/rustdoc-js/tester.js @@ -269,6 +269,12 @@ function runSearch(query, expected, index, loaded, loadedFile, queryName) { break; } var entry = expected[key]; + + if (exact_check == true && entry.length !== results[key].length) { + error_text.push(queryName + "==> Expected exactly " + entry.length + + " results but found " + results[key].length + " in '" + key + "'"); + } + var prev_pos = -1; for (var i = 0; i < entry.length; ++i) { var entry_pos = lookForEntry(entry[i], results[key]); @@ -307,8 +313,11 @@ function checkResult(error_text, loadedFile, displaySuccess) { } function runChecks(testFile, loaded, index) { - var loadedFile = loadContent( - readFile(testFile) + 'exports.QUERY = QUERY;exports.EXPECTED = EXPECTED;'); + var testFileContent = readFile(testFile) + 'exports.QUERY = QUERY;exports.EXPECTED = EXPECTED;'; + if (testFileContent.indexOf("FILTER_CRATE") !== -1) { + testFileContent += "exports.FILTER_CRATE = FILTER_CRATE;"; + } + var loadedFile = loadContent(testFileContent); const expected = loadedFile.EXPECTED; const query = loadedFile.QUERY;