* Fix the intermittent TestGPGGit failures Reattempt to open the listener if the port is busy with a delay up to a second Switch from generating a private key each time, just use a known good keytags/v1.21.12.1
| @@ -93,12 +93,12 @@ require ( | |||||
| github.com/unknwon/paginater v0.0.0-20151104151617-7748a72e0141 | github.com/unknwon/paginater v0.0.0-20151104151617-7748a72e0141 | ||||
| github.com/urfave/cli v1.20.0 | github.com/urfave/cli v1.20.0 | ||||
| github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53 | github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53 | ||||
| golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f | |||||
| golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 | |||||
| golang.org/x/net v0.0.0-20191101175033-0deb6923b6d9 | golang.org/x/net v0.0.0-20191101175033-0deb6923b6d9 | ||||
| golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 | golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 | ||||
| golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 | golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 | ||||
| golang.org/x/text v0.3.2 | golang.org/x/text v0.3.2 | ||||
| golang.org/x/tools v0.0.0-20190910221609-7f5965fd7709 // indirect | |||||
| golang.org/x/tools v0.0.0-20191213221258-04c2e8eff935 // indirect | |||||
| gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect | gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect | ||||
| gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 // indirect | gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 // indirect | ||||
| gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df | gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df | ||||
| @@ -596,8 +596,9 @@ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8U | |||||
| golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ= | golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ= | ||||
| golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f h1:kz4KIr+xcPUsI3VMoqWfPMvtnJ6MGfiVwsWSVzphMO4= | |||||
| golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | |||||
| golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | |||||
| golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= | |||||
| golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | |||||
| golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= | ||||
| golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= | golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= | ||||
| golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= | golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= | ||||
| @@ -612,6 +613,7 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU | |||||
| golang.org/x/mobile v0.0.0-20190814143026-e8b3e6111d02/go.mod h1:z5wpDCy2wbnXyFdvEuY3LhY9gBUL86/IOILm+Hsjx+E= | golang.org/x/mobile v0.0.0-20190814143026-e8b3e6111d02/go.mod h1:z5wpDCy2wbnXyFdvEuY3LhY9gBUL86/IOILm+Hsjx+E= | ||||
| golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= | golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= | ||||
| golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= | golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= | ||||
| golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= | |||||
| golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| @@ -701,9 +703,10 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw | |||||
| golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= | golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= | ||||
| golang.org/x/tools v0.0.0-20190808195139-e713427fea3f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | golang.org/x/tools v0.0.0-20190808195139-e713427fea3f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | ||||
| golang.org/x/tools v0.0.0-20190820033707-85edb9ef3283/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | golang.org/x/tools v0.0.0-20190820033707-85edb9ef3283/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | ||||
| golang.org/x/tools v0.0.0-20190910221609-7f5965fd7709 h1:2Ep+/X9v6ij0U1YP++QCLyZgWQHUwVJZkC6tSrH1Iuw= | |||||
| golang.org/x/tools v0.0.0-20190910221609-7f5965fd7709/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |||||
| golang.org/x/tools v0.0.0-20191213221258-04c2e8eff935 h1:kJQZhwFzSwJS2BxboKjdZzWczQOZx8VuH7Y8hhuGUtM= | |||||
| golang.org/x/tools v0.0.0-20191213221258-04c2e8eff935/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= | |||||
| golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||||
| google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= | google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= | ||||
| google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= | google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= | ||||
| google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= | google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= | ||||
| @@ -87,6 +87,12 @@ func onGiteaRun(t *testing.T, callback func(*testing.T, *url.URL), prepare ...bo | |||||
| u, err := url.Parse(setting.AppURL) | u, err := url.Parse(setting.AppURL) | ||||
| assert.NoError(t, err) | assert.NoError(t, err) | ||||
| listener, err := net.Listen("tcp", u.Host) | listener, err := net.Listen("tcp", u.Host) | ||||
| i := 0 | |||||
| for err != nil && i <= 10 { | |||||
| time.Sleep(100 * time.Millisecond) | |||||
| listener, err = net.Listen("tcp", u.Host) | |||||
| i++ | |||||
| } | |||||
| assert.NoError(t, err) | assert.NoError(t, err) | ||||
| u.Host = listener.Addr().String() | u.Host = listener.Addr().String() | ||||
| @@ -10,7 +10,6 @@ import ( | |||||
| "io/ioutil" | "io/ioutil" | ||||
| "net/url" | "net/url" | ||||
| "os" | "os" | ||||
| "path/filepath" | |||||
| "testing" | "testing" | ||||
| "code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
| @@ -40,8 +39,11 @@ func TestGPGGit(t *testing.T) { | |||||
| defer os.Setenv("GNUPGHOME", oldGNUPGHome) | defer os.Setenv("GNUPGHOME", oldGNUPGHome) | ||||
| // Need to create a root key | // Need to create a root key | ||||
| rootKeyPair, err := createGPGKey(tmpDir, "gitea", "gitea@fake.local") | |||||
| rootKeyPair, err := importTestingKey(tmpDir, "gitea", "gitea@fake.local") | |||||
| assert.NoError(t, err) | assert.NoError(t, err) | ||||
| if err != nil { | |||||
| assert.FailNow(t, "Unable to import rootKeyPair") | |||||
| } | |||||
| rootKeyID := rootKeyPair.PrimaryKey.KeyIdShortString() | rootKeyID := rootKeyPair.PrimaryKey.KeyIdShortString() | ||||
| @@ -125,6 +127,11 @@ func TestGPGGit(t *testing.T) { | |||||
| testCtx := NewAPITestContext(t, username, "initial-unsigned") | testCtx := NewAPITestContext(t, username, "initial-unsigned") | ||||
| t.Run("CreateCRUDFile-Always", crudActionCreateFile( | t.Run("CreateCRUDFile-Always", crudActionCreateFile( | ||||
| t, testCtx, user, "master", "always", "signed-always.txt", func(t *testing.T, response api.FileResponse) { | t, testCtx, user, "master", "always", "signed-always.txt", func(t *testing.T, response api.FileResponse) { | ||||
| assert.NotNil(t, response.Verification) | |||||
| if response.Verification == nil { | |||||
| assert.FailNow(t, "no verification provided with response! %v", response) | |||||
| return | |||||
| } | |||||
| assert.True(t, response.Verification.Verified) | assert.True(t, response.Verification.Verified) | ||||
| if !response.Verification.Verified { | if !response.Verification.Verified { | ||||
| t.FailNow() | t.FailNow() | ||||
| @@ -134,6 +141,11 @@ func TestGPGGit(t *testing.T) { | |||||
| })) | })) | ||||
| t.Run("CreateCRUDFile-ParentSigned-always", crudActionCreateFile( | t.Run("CreateCRUDFile-ParentSigned-always", crudActionCreateFile( | ||||
| t, testCtx, user, "parentsigned", "parentsigned-always", "signed-parent2.txt", func(t *testing.T, response api.FileResponse) { | t, testCtx, user, "parentsigned", "parentsigned-always", "signed-parent2.txt", func(t *testing.T, response api.FileResponse) { | ||||
| assert.NotNil(t, response.Verification) | |||||
| if response.Verification == nil { | |||||
| assert.FailNow(t, "no verification provided with response! %v", response) | |||||
| return | |||||
| } | |||||
| assert.True(t, response.Verification.Verified) | assert.True(t, response.Verification.Verified) | ||||
| if !response.Verification.Verified { | if !response.Verification.Verified { | ||||
| t.FailNow() | t.FailNow() | ||||
| @@ -152,6 +164,11 @@ func TestGPGGit(t *testing.T) { | |||||
| testCtx := NewAPITestContext(t, username, "initial-unsigned") | testCtx := NewAPITestContext(t, username, "initial-unsigned") | ||||
| t.Run("CreateCRUDFile-Always-ParentSigned", crudActionCreateFile( | t.Run("CreateCRUDFile-Always-ParentSigned", crudActionCreateFile( | ||||
| t, testCtx, user, "always", "always-parentsigned", "signed-always-parentsigned.txt", func(t *testing.T, response api.FileResponse) { | t, testCtx, user, "always", "always-parentsigned", "signed-always-parentsigned.txt", func(t *testing.T, response api.FileResponse) { | ||||
| assert.NotNil(t, response.Verification) | |||||
| if response.Verification == nil { | |||||
| assert.FailNow(t, "no verification provided with response! %v", response) | |||||
| return | |||||
| } | |||||
| assert.True(t, response.Verification.Verified) | assert.True(t, response.Verification.Verified) | ||||
| if !response.Verification.Verified { | if !response.Verification.Verified { | ||||
| t.FailNow() | t.FailNow() | ||||
| @@ -171,7 +188,15 @@ func TestGPGGit(t *testing.T) { | |||||
| t.Run("CreateRepository", doAPICreateRepository(testCtx, false)) | t.Run("CreateRepository", doAPICreateRepository(testCtx, false)) | ||||
| t.Run("CheckMasterBranchSigned", doAPIGetBranch(testCtx, "master", func(t *testing.T, branch api.Branch) { | t.Run("CheckMasterBranchSigned", doAPIGetBranch(testCtx, "master", func(t *testing.T, branch api.Branch) { | ||||
| assert.NotNil(t, branch.Commit) | assert.NotNil(t, branch.Commit) | ||||
| if branch.Commit == nil { | |||||
| assert.FailNow(t, "no commit provided with branch! %v", branch) | |||||
| return | |||||
| } | |||||
| assert.NotNil(t, branch.Commit.Verification) | assert.NotNil(t, branch.Commit.Verification) | ||||
| if branch.Commit.Verification == nil { | |||||
| assert.FailNow(t, "no verification provided with branch commit! %v", branch.Commit) | |||||
| return | |||||
| } | |||||
| assert.True(t, branch.Commit.Verification.Verified) | assert.True(t, branch.Commit.Verification.Verified) | ||||
| if !branch.Commit.Verification.Verified { | if !branch.Commit.Verification.Verified { | ||||
| t.FailNow() | t.FailNow() | ||||
| @@ -318,43 +343,26 @@ func crudActionCreateFile(t *testing.T, ctx APITestContext, user *models.User, f | |||||
| }, callback...) | }, callback...) | ||||
| } | } | ||||
| func createGPGKey(tmpDir, name, email string) (*openpgp.Entity, error) { | |||||
| keyPair, err := openpgp.NewEntity(name, "test", email, nil) | |||||
| if err != nil { | |||||
| func importTestingKey(tmpDir, name, email string) (*openpgp.Entity, error) { | |||||
| if _, _, err := process.GetManager().Exec("gpg --import integrations/private-testing.key", "gpg", "--import", "integrations/private-testing.key"); err != nil { | |||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| for _, id := range keyPair.Identities { | |||||
| err := id.SelfSignature.SignUserId(id.UserId.Id, keyPair.PrimaryKey, keyPair.PrivateKey, nil) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| keyFile := filepath.Join(tmpDir, "temporary.key") | |||||
| keyWriter, err := os.Create(keyFile) | |||||
| keyringFile, err := os.Open("integrations/private-testing.key") | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| defer keyWriter.Close() | |||||
| defer os.Remove(keyFile) | |||||
| defer keyringFile.Close() | |||||
| w, err := armor.Encode(keyWriter, openpgp.PrivateKeyType, nil) | |||||
| block, err := armor.Decode(keyringFile) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| defer w.Close() | |||||
| keyPair.SerializePrivate(w, nil) | |||||
| if err := w.Close(); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if err := keyWriter.Close(); err != nil { | |||||
| return nil, err | |||||
| keyring, err := openpgp.ReadKeyRing(block.Body) | |||||
| if err != nil { | |||||
| return nil, fmt.Errorf("Keyring access failed: '%v'", err) | |||||
| } | } | ||||
| if _, _, err := process.GetManager().Exec("gpg --import temporary.key", "gpg", "--import", keyFile); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return keyPair, nil | |||||
| // There should only be one entity in this file. | |||||
| return keyring[0], nil | |||||
| } | } | ||||
| @@ -0,0 +1,81 @@ | |||||
| -----BEGIN PGP PRIVATE KEY BLOCK----- | |||||
| lQVYBF3190cBDADpfh1CvNtLl8N6lJQ03jymeE8h2ocvT61stAj31eefiOvZPDyx | |||||
| n0kRztBcrQ1OITBEslYqKiYHVfVptscIWf+5bARoHnLQIgpJEio00ggqO3AP8lIZ | |||||
| uiy9/ARDVgyPl3WgMza/J3Z7W0sBJTtN/6W35i+eNEY4Q0mScmNIVc75oo5ey/pL | |||||
| JSF0qumiy94o38dy6Vk2dPEf5LyxJWoA4dLvj49LL/QGPqnsAXAQXr1zULaRuwf2 | |||||
| a8oKebr3m9wVkZcaH86duUmkU822k6OZSSxWCxZFFGVkC/VaA0uUjIL4a6SQqOOr | |||||
| PPOttmCLXJrtvpFdRwJ4PNwr9r2nPDDLwoajLJ8zo5jRumzwvkK9vALkY0BPuVoC | |||||
| qFdGY+2SGYKa4FTE7Mri/5j0NhWplhbcdGRe5doGDxJHbIN2UO21XjDlTBcscKep | |||||
| mWPVE2cdJrspYaZ9O0L6vhMVwGyyk+Qxmf6NbDw0q63AtqVe9qwbr2O3Irxseftw | |||||
| uWuSuLXzp+SMh/0AEQEAAQAL/2ExopuDwuNSHsh5bcIeGnAPV51Xentqtt2viaYk | |||||
| 0AB8PfTVGsyzafa0OM7DKG0z6oRGGhD+L4tRMFGbiHlFAWqdeK4gsplJ+i8VlSUc | |||||
| otJ1oH262IsmEPban6mp+ZuSKCASAYGLu0m5JF0rMucSeli1RHAeAXbtJ4SDAin7 | |||||
| sib/EDWMwjkikS0f8hZWt7kbAcqnMQA2qKKmlBdHZDtOxX/8KeFZ6kHpNtFrfcsK | |||||
| rOECIaVDDhr5HobCyl3E7tW5nrlrvSUkLVFl0IjcypqfzDlZp04PMdswhkdfBhu+ | |||||
| 0iY4K+d4uMPMzcpF1+mcn8C+7XK7jOqZysQa42bqgFHWEqljjJiUCuXfHbxnZWls | |||||
| 0R2j9FLgTqtPQ33f3zMjhOyvdiy1DmfzU9MSu/I0VqCJnq6AwlW5rBQaKwAQuHMB | |||||
| UJ7bjMx/z41z41v0IFpxHnwSa+tkl49tV+y8zVtajfwXxJNy8j/ElX0ywfM5sDHa | |||||
| RAVwI7DSwMk5azp3F15DnA6XbwYA8O0b5AIeCo8edmIdKgY3vAi20j/lsTgsTUkY | |||||
| GTQ4BdMohr9gpZWHZZmQ1TeZokm4Auex7UgPblflufepkADassXixMmSNUsggGI+ | |||||
| sR9qydNCw+qzgaJjchpwT5TdLJNHRbE+6VuGXJftcjdfXiKYZltEQBX8U4w7hui8 | |||||
| D6dpzJK5mE1QebrFnJ7IKpAe+hWTc1+g9iHH3rInPMIzQW72WqSKndKIrRy1PZS5 | |||||
| WM5MJzgWQaDzZSOQhrKA4yLIyzsrBgD4GfFLWh+sQ02sob5xmpDblfQUYVRy5TAx | |||||
| 4WOLSflJqqyarrk7s1D7obqsSoAEdJk521dpE/0ciI5xT41fQKMXH1Qm9tu9uW5d | |||||
| 1Y3oDxQXFJFa34gi5J9UbUBBIJRU0KyFcB1mGVF+fKbAKGPFR2lMCmkeqAYjVohM | |||||
| PG+tluArQrQYCwkZroR460TqvSadmPUekEjYsIzwlaOkJhGf7r40G5Djgyb2/LoC | |||||
| JY28zH7P9MXxIc7WAWuMJniUOqvslXcGAOkfZ1KVI61AIAvkEoRUpKwNSofs2PDQ | |||||
| 1K5Q9DN0NK5UNAAr+Wn91mw/MBXqxdheq9wjmcsvx8OAhvw7O89QMuTviCTUQjSl | |||||
| Wzel6gpoZhpOgVb2RTxV7yVrp2fgYKkeUr7hiGhSxw78guF2jLgfBgb1ef+XKIMk | |||||
| 5anUqKcsHHiouBQbcUCDyKBcVeIUKjAuh9ADpqn1v1oVshugnjpx32Oq1AW6Mn9e | |||||
| SmxBoR7YIvsy79P2IonjixEAjSp1chkGpNQTtBhnaXRlYSA8Z2l0ZWFAZmFrZS5s | |||||
| b2NhbD6JAdQEEwEKAD4WIQQ4G/p4KVUOUEVu5g5R68KXFICqDwUCXfX3RwIbAwUJ | |||||
| A8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRBR68KXFICqD/8/C/4wYdr1 | |||||
| Q6fnXiAdBZPQDOHUjCSjH1d6tbVYaCz0ku8rK1uU0oTToKRSTXH9K59erHl//MEX | |||||
| Rte6loI22y3GFA8wAWzFKGwb6wDUr0SkH21espsp+plKUI/gHV8JyfWs0pLmy/C0 | |||||
| tOr9XhdnQepFjpDAquWszSO0B70G32Tl82Tzbrho6+ePvU+2X0fYj1F8q/2bmegB | |||||
| lL1CcdVuivBqYglj6tzlurPXFq1QenJdssZNn0fizGiGfTY/7kgrvKHc4KN03i9d | |||||
| PUrPMQw7J59KSFNdkE3KYdedmEeWBVmrbfC8QBEO1zcTJN9wwV8fVv4qOhKN8yIO | |||||
| QLuhBZTeChtP3i2FCPHQqbeD2f0SG+yBbWu/OyfSC2YHcGyjbNV8D9upNg5eIJ34 | |||||
| Sm5i0tGUYEdq9QQROacXn2/MhyJuJYbFrTcsHLsSiwygUXvHOqi0G0gEjWl67yMd | |||||
| 9YIq1sZlNs3WY7ASrV+QcB7B9WKJAyh5YWz/G4MlThU91YUfltAb3QmNFeadBVgE | |||||
| XfX3RwEMALH7pae22J/BmhFFBCjMld166hTknXjOR459csv5ppky91Yl82VFFg+J | |||||
| G6hbkGzvCQ5XDJu8xgWeXowe2sXkyDwhTRaB6MEnA4KW5PUazL7KlDGsR5oPvBlE | |||||
| dSQDGzTV/RPcszSNdcN9MRNbfAf0ZFV6D9R3CIlNZAm6HwML7lZ0JmCiLORz3TbF | |||||
| 4kg1KDZIQAhY7Y7AuMdoXfnpUqFLba2ZxZBvdcrMcuYz8GkmFsYdi1/JuXEK3//B | |||||
| Mo7Pg78zsq7UolUcT2p3qKb7hB3CEtwa3xffwzgAcFSKYrCE/5/IYjHhS97uKWor | |||||
| 8dh59wUCuPCmAiuIz3aD84rxZIHgBGPy03TEWCrCBCVxAdH/2Ezpn3DpuZyCuanJ | |||||
| 0WGSzrPBw+twA8bk9BATvFVQ/7Bs9deSsMAOI1uj9lTy1R9LU/KHEr8BEz61+Bgk | |||||
| +m4ev6OQAVDY6QpRtf+zfB2xO95Wu4l1pIFuz7OJaZLCm2ApeAKsCCUDdTSJn5e7 | |||||
| 0i1E4SIVgwARAQABAAv/WHaZqbiqBw21RCwnmxfEzWbQfj37PxZYXqxfqJ6XfcHl | |||||
| Sb5nMcia5HHje1S3fk15FNWTgLzdN+G1YLPdTUsfczOiGzPKumZnyjqx5lnBtnr+ | |||||
| GYpltF9pwK1UA+g/V42c0oh50f8Vr2rEP7jS9ykzzYBz6ciYR5ZdyK/nxh3iArqM | |||||
| cK9q3MnyA81rYTR6njBfE0cQHEoSDZsESrj7xwu0ofqyRc4AoCHqYh0iu0ChRSte | |||||
| IOgk8djT6Uzfkjf2ZcyNiD2/iFzAXaI8CpoiiRJDn/qIhtSFqjb284wwbmTUE+Nw | |||||
| LjeMbpKQiWqnsw2GKhlXVvTLjrCb8TIKjbLtFH2HlEaIjL332GcVqkVy2TMtjZRi | |||||
| lhy/uSY2kzkBkoGJXp5isJFk3ZcOHHsG4VQ+08vq++GoqQE8U1t8zMfAbBFoFlpP | |||||
| nkRjZs0MwY9u6C0IiXRDrYrMIW12LjsRBiebGHUhzv10/4T0XZ8FKTewxcszcuMf | |||||
| lpbIotF1ItGqCXqgufnhBgDBhi8BErxO59ksWLAHdozDyiXwQ5ua6WesGrIRvw23 | |||||
| Z8a5wxTByXmd1fMgJ509hpXbxUC94TGObJoUo23YE3TpqlTLs5NqeFzzU+OjWSb0 | |||||
| Wo1hpFlzyatuynpz2aXbKbjw5dgyeIxj9t+NhGo2SW6v+RHtYAWJRFBFOPVOLwTy | |||||
| an733pA3MSUT0oEh+aggDkXEJLBum0P3Onnma7wR7Xj2Nk1SGLCMgVmKbtGlvyrj | |||||
| yc5FZhzuvOfeuLSYoa0KE4sGAOtxED2jDkV2HS67bxrUzMLvAAhOnRmunA7Qk4F7 | |||||
| B2uMYa03O7vnUJAINpmZVu/ubWz1/JRV6M3/1lQTH+2B9kZ5v6kHrczCsoSP2dXD | |||||
| 7CQnxSm6zngdgwkoo+9pgFztGUZM071SjRW+r1IwE/XBZNwFya5PM02/Akb0ejuB | |||||
| 6K2ClnIFf7gflndUZ0mhZn48I88b6mzEG4X4uUZG+4vW8EZEInl+nMA9f3S6YT0U | |||||
| ZG4JC8JMKsmoYLye/BuedHxk6QX6AnMFBjK7cnfBnViJkmXhDLxmcCjwjUUBraRI | |||||
| QbyzHzY2Jq1VyhTJ1HZxE+vj26MzFFzjpe84r1Ggrcowx53RHstBBYBA5OjRy+cN | |||||
| vDzqqWz4cDKU/XlwJhRnG+PcY3c47obpvjjagcwG7xU4df15fDetKajnIloA5r22 | |||||
| hbmVmTAqljyWLnvSNYrvf5QDqqg6tBuHITUiZhYgECpIoeEj9hU8MZSvQOscK0kx | |||||
| Vn8SqUjxDcNazQM8NoxNB10wfJw63hCJAbwEGAEKACYWIQQ4G/p4KVUOUEVu5g5R | |||||
| 68KXFICqDwUCXfX3RwIbDAUJA8JnAAAKCRBR68KXFICqD54+DAC4VZpKrU6Oo04z | |||||
| /gJeC+3fNon6W9Pdxx7KimDOttkpCiss8JydO6aSB97xrWdvMBTui333qGo2exE/ | |||||
| XFA4RF7K4lAKUWbwaR1brLQfGVYOltmMb986/LeE3OsmMt4vbxUnGvHVX+QXDWAr | |||||
| p6q4DZvMgQQhbWp+rMjXtRr10iQnSlM5CYhyawdiiahFqgoo8395l/2JA2YGhUgU | |||||
| nARUPZ9SqaUmRm+KGsSyoYnvN9apiDk5KVQoyfrmweNN7DCIIcoh/B9Ax8nmouKz | |||||
| yBB2fjCM/bJNtN/AsgYbZIScuYK/xqTkwNtbe5WdCyD/QJOHTsPJzx59hgSVo6gf | |||||
| Fe8VBnxHtrY8gPSUU3gkhYLvLzyVX+YLNzRcffobd8gJbfumwFJUkz91oGvYz7xg | |||||
| XN2qmsgBNCbTIzWZMpRDMAbY+n2QFImGf+EJZlMdj6gOrIYq8N4+nMW1FwJivsOb | |||||
| muqySyjZnD2AYjEA6OYPXfCVhaB5fTfhQXbIrZbgsEh4ob/eIdM= | |||||
| =oSDR | |||||
| -----END PGP PRIVATE KEY BLOCK----- | |||||
| @@ -11,6 +11,7 @@ import ( | |||||
| "crypto/rsa" | "crypto/rsa" | ||||
| "crypto/sha256" | "crypto/sha256" | ||||
| _ "crypto/sha512" // need for EC keys | _ "crypto/sha512" // need for EC keys | ||||
| "encoding/asn1" | |||||
| "encoding/base64" | "encoding/base64" | ||||
| "encoding/json" | "encoding/json" | ||||
| "fmt" | "fmt" | ||||
| @@ -126,21 +127,23 @@ func jwkEncode(pub crypto.PublicKey) (string, error) { | |||||
| // jwsSign signs the digest using the given key. | // jwsSign signs the digest using the given key. | ||||
| // The hash is unused for ECDSA keys. | // The hash is unused for ECDSA keys. | ||||
| // | |||||
| // Note: non-stdlib crypto.Signer implementations are expected to return | |||||
| // the signature in the format as specified in RFC7518. | |||||
| // See https://tools.ietf.org/html/rfc7518 for more details. | |||||
| func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { | func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { | ||||
| if key, ok := key.(*ecdsa.PrivateKey); ok { | |||||
| // The key.Sign method of ecdsa returns ASN1-encoded signature. | |||||
| // So, we use the package Sign function instead | |||||
| // to get R and S values directly and format the result accordingly. | |||||
| r, s, err := ecdsa.Sign(rand.Reader, key, digest) | |||||
| switch pub := key.Public().(type) { | |||||
| case *rsa.PublicKey: | |||||
| return key.Sign(rand.Reader, digest, hash) | |||||
| case *ecdsa.PublicKey: | |||||
| sigASN1, err := key.Sign(rand.Reader, digest, hash) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| rb, sb := r.Bytes(), s.Bytes() | |||||
| size := key.Params().BitSize / 8 | |||||
| var rs struct{ R, S *big.Int } | |||||
| if _, err := asn1.Unmarshal(sigASN1, &rs); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| rb, sb := rs.R.Bytes(), rs.S.Bytes() | |||||
| size := pub.Params().BitSize / 8 | |||||
| if size%8 > 0 { | if size%8 > 0 { | ||||
| size++ | size++ | ||||
| } | } | ||||
| @@ -149,7 +152,7 @@ func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) | |||||
| copy(sig[size*2-len(sb):], sb) | copy(sig[size*2-len(sb):], sb) | ||||
| return sig, nil | return sig, nil | ||||
| } | } | ||||
| return key.Sign(rand.Reader, digest, hash) | |||||
| return nil, ErrUnsupportedKey | |||||
| } | } | ||||
| // jwsHasher indicates suitable JWS algorithm name and a hash function | // jwsHasher indicates suitable JWS algorithm name and a hash function | ||||
| @@ -17,12 +17,14 @@ import ( | |||||
| // These constants from [PROTOCOL.certkeys] represent the algorithm names | // These constants from [PROTOCOL.certkeys] represent the algorithm names | ||||
| // for certificate types supported by this package. | // for certificate types supported by this package. | ||||
| const ( | const ( | ||||
| CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" | |||||
| CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" | |||||
| CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" | |||||
| CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" | |||||
| CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" | |||||
| CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" | |||||
| CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" | |||||
| CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" | |||||
| CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" | |||||
| CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" | |||||
| CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" | |||||
| CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" | |||||
| CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" | |||||
| CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" | |||||
| ) | ) | ||||
| // Certificate types distinguish between host and user | // Certificate types distinguish between host and user | ||||
| @@ -37,6 +39,7 @@ const ( | |||||
| type Signature struct { | type Signature struct { | ||||
| Format string | Format string | ||||
| Blob []byte | Blob []byte | ||||
| Rest []byte `ssh:"rest"` | |||||
| } | } | ||||
| // CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that | // CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that | ||||
| @@ -429,12 +432,14 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { | |||||
| } | } | ||||
| var certAlgoNames = map[string]string{ | var certAlgoNames = map[string]string{ | ||||
| KeyAlgoRSA: CertAlgoRSAv01, | |||||
| KeyAlgoDSA: CertAlgoDSAv01, | |||||
| KeyAlgoECDSA256: CertAlgoECDSA256v01, | |||||
| KeyAlgoECDSA384: CertAlgoECDSA384v01, | |||||
| KeyAlgoECDSA521: CertAlgoECDSA521v01, | |||||
| KeyAlgoED25519: CertAlgoED25519v01, | |||||
| KeyAlgoRSA: CertAlgoRSAv01, | |||||
| KeyAlgoDSA: CertAlgoDSAv01, | |||||
| KeyAlgoECDSA256: CertAlgoECDSA256v01, | |||||
| KeyAlgoECDSA384: CertAlgoECDSA384v01, | |||||
| KeyAlgoECDSA521: CertAlgoECDSA521v01, | |||||
| KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01, | |||||
| KeyAlgoED25519: CertAlgoED25519v01, | |||||
| KeyAlgoSKED25519: CertAlgoSKED25519v01, | |||||
| } | } | ||||
| // certToPrivAlgo returns the underlying algorithm for a certificate algorithm. | // certToPrivAlgo returns the underlying algorithm for a certificate algorithm. | ||||
| @@ -518,6 +523,12 @@ func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { | |||||
| return | return | ||||
| } | } | ||||
| switch out.Format { | |||||
| case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01: | |||||
| out.Rest = in | |||||
| return out, nil, ok | |||||
| } | |||||
| return out, in, ok | return out, in, ok | ||||
| } | } | ||||
| @@ -30,12 +30,14 @@ import ( | |||||
| // These constants represent the algorithm names for key types supported by this | // These constants represent the algorithm names for key types supported by this | ||||
| // package. | // package. | ||||
| const ( | const ( | ||||
| KeyAlgoRSA = "ssh-rsa" | |||||
| KeyAlgoDSA = "ssh-dss" | |||||
| KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" | |||||
| KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" | |||||
| KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" | |||||
| KeyAlgoED25519 = "ssh-ed25519" | |||||
| KeyAlgoRSA = "ssh-rsa" | |||||
| KeyAlgoDSA = "ssh-dss" | |||||
| KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" | |||||
| KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" | |||||
| KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" | |||||
| KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" | |||||
| KeyAlgoED25519 = "ssh-ed25519" | |||||
| KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" | |||||
| ) | ) | ||||
| // These constants represent non-default signature algorithms that are supported | // These constants represent non-default signature algorithms that are supported | ||||
| @@ -58,9 +60,13 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err | |||||
| return parseDSA(in) | return parseDSA(in) | ||||
| case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: | case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: | ||||
| return parseECDSA(in) | return parseECDSA(in) | ||||
| case KeyAlgoSKECDSA256: | |||||
| return parseSKECDSA(in) | |||||
| case KeyAlgoED25519: | case KeyAlgoED25519: | ||||
| return parseED25519(in) | return parseED25519(in) | ||||
| case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: | |||||
| case KeyAlgoSKED25519: | |||||
| return parseSKEd25519(in) | |||||
| case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: | |||||
| cert, err := parseCert(in, certToPrivAlgo(algo)) | cert, err := parseCert(in, certToPrivAlgo(algo)) | ||||
| if err != nil { | if err != nil { | ||||
| return nil, nil, err | return nil, nil, err | ||||
| @@ -685,6 +691,218 @@ func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { | |||||
| return (*ecdsa.PublicKey)(k) | return (*ecdsa.PublicKey)(k) | ||||
| } | } | ||||
| // skFields holds the additional fields present in U2F/FIDO2 signatures. | |||||
| // See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details. | |||||
| type skFields struct { | |||||
| // Flags contains U2F/FIDO2 flags such as 'user present' | |||||
| Flags byte | |||||
| // Counter is a monotonic signature counter which can be | |||||
| // used to detect concurrent use of a private key, should | |||||
| // it be extracted from hardware. | |||||
| Counter uint32 | |||||
| } | |||||
| type skECDSAPublicKey struct { | |||||
| // application is a URL-like string, typically "ssh:" for SSH. | |||||
| // see openssh/PROTOCOL.u2f for details. | |||||
| application string | |||||
| ecdsa.PublicKey | |||||
| } | |||||
| func (k *skECDSAPublicKey) Type() string { | |||||
| return KeyAlgoSKECDSA256 | |||||
| } | |||||
| func (k *skECDSAPublicKey) nistID() string { | |||||
| return "nistp256" | |||||
| } | |||||
| func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) { | |||||
| var w struct { | |||||
| Curve string | |||||
| KeyBytes []byte | |||||
| Application string | |||||
| Rest []byte `ssh:"rest"` | |||||
| } | |||||
| if err := Unmarshal(in, &w); err != nil { | |||||
| return nil, nil, err | |||||
| } | |||||
| key := new(skECDSAPublicKey) | |||||
| key.application = w.Application | |||||
| if w.Curve != "nistp256" { | |||||
| return nil, nil, errors.New("ssh: unsupported curve") | |||||
| } | |||||
| key.Curve = elliptic.P256() | |||||
| key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) | |||||
| if key.X == nil || key.Y == nil { | |||||
| return nil, nil, errors.New("ssh: invalid curve point") | |||||
| } | |||||
| return key, w.Rest, nil | |||||
| } | |||||
| func (k *skECDSAPublicKey) Marshal() []byte { | |||||
| // See RFC 5656, section 3.1. | |||||
| keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) | |||||
| w := struct { | |||||
| Name string | |||||
| ID string | |||||
| Key []byte | |||||
| Application string | |||||
| }{ | |||||
| k.Type(), | |||||
| k.nistID(), | |||||
| keyBytes, | |||||
| k.application, | |||||
| } | |||||
| return Marshal(&w) | |||||
| } | |||||
| func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { | |||||
| if sig.Format != k.Type() { | |||||
| return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) | |||||
| } | |||||
| h := ecHash(k.Curve).New() | |||||
| h.Write([]byte(k.application)) | |||||
| appDigest := h.Sum(nil) | |||||
| h.Reset() | |||||
| h.Write(data) | |||||
| dataDigest := h.Sum(nil) | |||||
| var ecSig struct { | |||||
| R *big.Int | |||||
| S *big.Int | |||||
| } | |||||
| if err := Unmarshal(sig.Blob, &ecSig); err != nil { | |||||
| return err | |||||
| } | |||||
| var skf skFields | |||||
| if err := Unmarshal(sig.Rest, &skf); err != nil { | |||||
| return err | |||||
| } | |||||
| blob := struct { | |||||
| ApplicationDigest []byte `ssh:"rest"` | |||||
| Flags byte | |||||
| Counter uint32 | |||||
| MessageDigest []byte `ssh:"rest"` | |||||
| }{ | |||||
| appDigest, | |||||
| skf.Flags, | |||||
| skf.Counter, | |||||
| dataDigest, | |||||
| } | |||||
| original := Marshal(blob) | |||||
| h.Reset() | |||||
| h.Write(original) | |||||
| digest := h.Sum(nil) | |||||
| if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) { | |||||
| return nil | |||||
| } | |||||
| return errors.New("ssh: signature did not verify") | |||||
| } | |||||
| type skEd25519PublicKey struct { | |||||
| // application is a URL-like string, typically "ssh:" for SSH. | |||||
| // see openssh/PROTOCOL.u2f for details. | |||||
| application string | |||||
| ed25519.PublicKey | |||||
| } | |||||
| func (k *skEd25519PublicKey) Type() string { | |||||
| return KeyAlgoSKED25519 | |||||
| } | |||||
| func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { | |||||
| var w struct { | |||||
| KeyBytes []byte | |||||
| Application string | |||||
| Rest []byte `ssh:"rest"` | |||||
| } | |||||
| if err := Unmarshal(in, &w); err != nil { | |||||
| return nil, nil, err | |||||
| } | |||||
| key := new(skEd25519PublicKey) | |||||
| key.application = w.Application | |||||
| key.PublicKey = ed25519.PublicKey(w.KeyBytes) | |||||
| return key, w.Rest, nil | |||||
| } | |||||
| func (k *skEd25519PublicKey) Marshal() []byte { | |||||
| w := struct { | |||||
| Name string | |||||
| KeyBytes []byte | |||||
| Application string | |||||
| }{ | |||||
| KeyAlgoSKED25519, | |||||
| []byte(k.PublicKey), | |||||
| k.application, | |||||
| } | |||||
| return Marshal(&w) | |||||
| } | |||||
| func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { | |||||
| if sig.Format != k.Type() { | |||||
| return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) | |||||
| } | |||||
| h := sha256.New() | |||||
| h.Write([]byte(k.application)) | |||||
| appDigest := h.Sum(nil) | |||||
| h.Reset() | |||||
| h.Write(data) | |||||
| dataDigest := h.Sum(nil) | |||||
| var edSig struct { | |||||
| Signature []byte `ssh:"rest"` | |||||
| } | |||||
| if err := Unmarshal(sig.Blob, &edSig); err != nil { | |||||
| return err | |||||
| } | |||||
| var skf skFields | |||||
| if err := Unmarshal(sig.Rest, &skf); err != nil { | |||||
| return err | |||||
| } | |||||
| blob := struct { | |||||
| ApplicationDigest []byte `ssh:"rest"` | |||||
| Flags byte | |||||
| Counter uint32 | |||||
| MessageDigest []byte `ssh:"rest"` | |||||
| }{ | |||||
| appDigest, | |||||
| skf.Flags, | |||||
| skf.Counter, | |||||
| dataDigest, | |||||
| } | |||||
| original := Marshal(blob) | |||||
| edKey := (ed25519.PublicKey)(k.PublicKey) | |||||
| if ok := ed25519.Verify(edKey, original, edSig.Signature); !ok { | |||||
| return errors.New("ssh: signature did not verify") | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, | // NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, | ||||
| // *ecdsa.PrivateKey or any other crypto.Signer and returns a | // *ecdsa.PrivateKey or any other crypto.Signer and returns a | ||||
| // corresponding Signer instance. ECDSA keys must use P-256, P-384 or | // corresponding Signer instance. ECDSA keys must use P-256, P-384 or | ||||
| @@ -837,7 +1055,8 @@ func NewPublicKey(key interface{}) (PublicKey, error) { | |||||
| } | } | ||||
| // ParsePrivateKey returns a Signer from a PEM encoded private key. It supports | // ParsePrivateKey returns a Signer from a PEM encoded private key. It supports | ||||
| // the same keys as ParseRawPrivateKey. | |||||
| // the same keys as ParseRawPrivateKey. If the private key is encrypted, it | |||||
| // will return a PassphraseMissingError. | |||||
| func ParsePrivateKey(pemBytes []byte) (Signer, error) { | func ParsePrivateKey(pemBytes []byte) (Signer, error) { | ||||
| key, err := ParseRawPrivateKey(pemBytes) | key, err := ParseRawPrivateKey(pemBytes) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -850,8 +1069,8 @@ func ParsePrivateKey(pemBytes []byte) (Signer, error) { | |||||
| // ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private | // ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private | ||||
| // key and passphrase. It supports the same keys as | // key and passphrase. It supports the same keys as | ||||
| // ParseRawPrivateKeyWithPassphrase. | // ParseRawPrivateKeyWithPassphrase. | ||||
| func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) { | |||||
| key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase) | |||||
| func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) { | |||||
| key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| @@ -867,8 +1086,21 @@ func encryptedBlock(block *pem.Block) bool { | |||||
| return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") | return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") | ||||
| } | } | ||||
| // A PassphraseMissingError indicates that parsing this private key requires a | |||||
| // passphrase. Use ParsePrivateKeyWithPassphrase. | |||||
| type PassphraseMissingError struct { | |||||
| // PublicKey will be set if the private key format includes an unencrypted | |||||
| // public key along with the encrypted private key. | |||||
| PublicKey PublicKey | |||||
| } | |||||
| func (*PassphraseMissingError) Error() string { | |||||
| return "ssh: this private key is passphrase protected" | |||||
| } | |||||
| // ParseRawPrivateKey returns a private key from a PEM encoded private key. It | // ParseRawPrivateKey returns a private key from a PEM encoded private key. It | ||||
| // supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. | |||||
| // supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the | |||||
| // private key is encrypted, it will return a PassphraseMissingError. | |||||
| func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { | func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { | ||||
| block, _ := pem.Decode(pemBytes) | block, _ := pem.Decode(pemBytes) | ||||
| if block == nil { | if block == nil { | ||||
| @@ -876,7 +1108,7 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { | |||||
| } | } | ||||
| if encryptedBlock(block) { | if encryptedBlock(block) { | ||||
| return nil, errors.New("ssh: cannot decode encrypted private keys") | |||||
| return nil, &PassphraseMissingError{} | |||||
| } | } | ||||
| switch block.Type { | switch block.Type { | ||||
| @@ -899,24 +1131,22 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { | |||||
| // ParseRawPrivateKeyWithPassphrase returns a private key decrypted with | // ParseRawPrivateKeyWithPassphrase returns a private key decrypted with | ||||
| // passphrase from a PEM encoded private key. If wrong passphrase, return | // passphrase from a PEM encoded private key. If wrong passphrase, return | ||||
| // x509.IncorrectPasswordError. | // x509.IncorrectPasswordError. | ||||
| func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) { | |||||
| func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) { | |||||
| block, _ := pem.Decode(pemBytes) | block, _ := pem.Decode(pemBytes) | ||||
| if block == nil { | if block == nil { | ||||
| return nil, errors.New("ssh: no key found") | return nil, errors.New("ssh: no key found") | ||||
| } | } | ||||
| buf := block.Bytes | |||||
| if encryptedBlock(block) { | |||||
| if x509.IsEncryptedPEMBlock(block) { | |||||
| var err error | |||||
| buf, err = x509.DecryptPEMBlock(block, passPhrase) | |||||
| if err != nil { | |||||
| if err == x509.IncorrectPasswordError { | |||||
| return nil, err | |||||
| } | |||||
| return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) | |||||
| } | |||||
| if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) { | |||||
| return nil, errors.New("ssh: not an encrypted key") | |||||
| } | |||||
| buf, err := x509.DecryptPEMBlock(block, passphrase) | |||||
| if err != nil { | |||||
| if err == x509.IncorrectPasswordError { | |||||
| return nil, err | |||||
| } | } | ||||
| return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) | |||||
| } | } | ||||
| switch block.Type { | switch block.Type { | ||||
| @@ -926,8 +1156,6 @@ func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, | |||||
| return x509.ParseECPrivateKey(buf) | return x509.ParseECPrivateKey(buf) | ||||
| case "DSA PRIVATE KEY": | case "DSA PRIVATE KEY": | ||||
| return ParseDSAPrivateKey(buf) | return ParseDSAPrivateKey(buf) | ||||
| case "OPENSSH PRIVATE KEY": | |||||
| return parseOpenSSHPrivateKey(buf) | |||||
| default: | default: | ||||
| return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) | return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) | ||||
| } | } | ||||
| @@ -284,8 +284,8 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) | |||||
| func isAcceptableAlgo(algo string) bool { | func isAcceptableAlgo(algo string) bool { | ||||
| switch algo { | switch algo { | ||||
| case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, | |||||
| CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: | |||||
| case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, | |||||
| CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: | |||||
| return true | return true | ||||
| } | } | ||||
| return false | return false | ||||
| @@ -65,7 +65,7 @@ func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Conte | |||||
| // | // | ||||
| // The archive consists of a series of files. Each file consists of a | // The archive consists of a series of files. Each file consists of a | ||||
| // name, a decimal file size and the file contents, separated by | // name, a decimal file size and the file contents, separated by | ||||
| // newlinews. No newline follows after the file contents. | |||||
| // newlines. No newline follows after the file contents. | |||||
| func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) { | func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) { | ||||
| overlay := make(map[string][]byte) | overlay := make(map[string][]byte) | ||||
| r := bufio.NewReader(archive) | r := bufio.NewReader(archive) | ||||
| @@ -100,7 +100,7 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, | |||||
| // Write writes encoded type information for the specified package to out. | // Write writes encoded type information for the specified package to out. | ||||
| // The FileSet provides file position information for named objects. | // The FileSet provides file position information for named objects. | ||||
| func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { | func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { | ||||
| b, err := gcimporter.BExportData(fset, pkg) | |||||
| b, err := gcimporter.IExportData(fset, pkg) | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| @@ -332,7 +332,7 @@ func (p *importer) pos() token.Pos { | |||||
| p.prevFile = file | p.prevFile = file | ||||
| p.prevLine = line | p.prevLine = line | ||||
| return p.fake.pos(file, line) | |||||
| return p.fake.pos(file, line, 0) | |||||
| } | } | ||||
| // Synthesize a token.Pos | // Synthesize a token.Pos | ||||
| @@ -341,7 +341,9 @@ type fakeFileSet struct { | |||||
| files map[string]*token.File | files map[string]*token.File | ||||
| } | } | ||||
| func (s *fakeFileSet) pos(file string, line int) token.Pos { | |||||
| func (s *fakeFileSet) pos(file string, line, column int) token.Pos { | |||||
| // TODO(mdempsky): Make use of column. | |||||
| // Since we don't know the set of needed file positions, we | // Since we don't know the set of needed file positions, we | ||||
| // reserve maxlines positions per file. | // reserve maxlines positions per file. | ||||
| const maxlines = 64 * 1024 | const maxlines = 64 * 1024 | ||||
| @@ -6,8 +6,6 @@ | |||||
| // This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; | // This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; | ||||
| // see that file for specification of the format. | // see that file for specification of the format. | ||||
| // +build go1.11 | |||||
| package gcimporter | package gcimporter | ||||
| import ( | import ( | ||||
| @@ -28,7 +26,10 @@ import ( | |||||
| const iexportVersion = 0 | const iexportVersion = 0 | ||||
| // IExportData returns the binary export data for pkg. | // IExportData returns the binary export data for pkg. | ||||
| // | |||||
| // If no file set is provided, position info will be missing. | // If no file set is provided, position info will be missing. | ||||
| // The package path of the top-level package will not be recorded, | |||||
| // so that calls to IImportData can override with a provided package path. | |||||
| func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { | func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { | ||||
| defer func() { | defer func() { | ||||
| if e := recover(); e != nil { | if e := recover(); e != nil { | ||||
| @@ -48,6 +49,7 @@ func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) | |||||
| stringIndex: map[string]uint64{}, | stringIndex: map[string]uint64{}, | ||||
| declIndex: map[types.Object]uint64{}, | declIndex: map[types.Object]uint64{}, | ||||
| typIndex: map[types.Type]uint64{}, | typIndex: map[types.Type]uint64{}, | ||||
| localpkg: pkg, | |||||
| } | } | ||||
| for i, pt := range predeclared() { | for i, pt := range predeclared() { | ||||
| @@ -73,7 +75,7 @@ func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) | |||||
| // Append indices to data0 section. | // Append indices to data0 section. | ||||
| dataLen := uint64(p.data0.Len()) | dataLen := uint64(p.data0.Len()) | ||||
| w := p.newWriter() | w := p.newWriter() | ||||
| w.writeIndex(p.declIndex, pkg) | |||||
| w.writeIndex(p.declIndex) | |||||
| w.flush() | w.flush() | ||||
| // Assemble header. | // Assemble header. | ||||
| @@ -95,14 +97,14 @@ func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) | |||||
| // we're writing out the main index, which is also read by | // we're writing out the main index, which is also read by | ||||
| // non-compiler tools and includes a complete package description | // non-compiler tools and includes a complete package description | ||||
| // (i.e., name and height). | // (i.e., name and height). | ||||
| func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) { | |||||
| func (w *exportWriter) writeIndex(index map[types.Object]uint64) { | |||||
| // Build a map from packages to objects from that package. | // Build a map from packages to objects from that package. | ||||
| pkgObjs := map[*types.Package][]types.Object{} | pkgObjs := map[*types.Package][]types.Object{} | ||||
| // For the main index, make sure to include every package that | // For the main index, make sure to include every package that | ||||
| // we reference, even if we're not exporting (or reexporting) | // we reference, even if we're not exporting (or reexporting) | ||||
| // any symbols from it. | // any symbols from it. | ||||
| pkgObjs[localpkg] = nil | |||||
| pkgObjs[w.p.localpkg] = nil | |||||
| for pkg := range w.p.allPkgs { | for pkg := range w.p.allPkgs { | ||||
| pkgObjs[pkg] = nil | pkgObjs[pkg] = nil | ||||
| } | } | ||||
| @@ -121,12 +123,12 @@ func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types | |||||
| } | } | ||||
| sort.Slice(pkgs, func(i, j int) bool { | sort.Slice(pkgs, func(i, j int) bool { | ||||
| return pkgs[i].Path() < pkgs[j].Path() | |||||
| return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) | |||||
| }) | }) | ||||
| w.uint64(uint64(len(pkgs))) | w.uint64(uint64(len(pkgs))) | ||||
| for _, pkg := range pkgs { | for _, pkg := range pkgs { | ||||
| w.string(pkg.Path()) | |||||
| w.string(w.exportPath(pkg)) | |||||
| w.string(pkg.Name()) | w.string(pkg.Name()) | ||||
| w.uint64(uint64(0)) // package height is not needed for go/types | w.uint64(uint64(0)) // package height is not needed for go/types | ||||
| @@ -143,6 +145,8 @@ type iexporter struct { | |||||
| fset *token.FileSet | fset *token.FileSet | ||||
| out *bytes.Buffer | out *bytes.Buffer | ||||
| localpkg *types.Package | |||||
| // allPkgs tracks all packages that have been referenced by | // allPkgs tracks all packages that have been referenced by | ||||
| // the export data, so we can ensure to include them in the | // the export data, so we can ensure to include them in the | ||||
| // main index. | // main index. | ||||
| @@ -195,6 +199,13 @@ type exportWriter struct { | |||||
| prevLine int64 | prevLine int64 | ||||
| } | } | ||||
| func (w *exportWriter) exportPath(pkg *types.Package) string { | |||||
| if pkg == w.p.localpkg { | |||||
| return "" | |||||
| } | |||||
| return pkg.Path() | |||||
| } | |||||
| func (p *iexporter) doDecl(obj types.Object) { | func (p *iexporter) doDecl(obj types.Object) { | ||||
| w := p.newWriter() | w := p.newWriter() | ||||
| w.setPkg(obj.Pkg(), false) | w.setPkg(obj.Pkg(), false) | ||||
| @@ -267,6 +278,11 @@ func (w *exportWriter) tag(tag byte) { | |||||
| } | } | ||||
| func (w *exportWriter) pos(pos token.Pos) { | func (w *exportWriter) pos(pos token.Pos) { | ||||
| if w.p.fset == nil { | |||||
| w.int64(0) | |||||
| return | |||||
| } | |||||
| p := w.p.fset.Position(pos) | p := w.p.fset.Position(pos) | ||||
| file := p.Filename | file := p.Filename | ||||
| line := int64(p.Line) | line := int64(p.Line) | ||||
| @@ -299,7 +315,7 @@ func (w *exportWriter) pkg(pkg *types.Package) { | |||||
| // Ensure any referenced packages are declared in the main index. | // Ensure any referenced packages are declared in the main index. | ||||
| w.p.allPkgs[pkg] = true | w.p.allPkgs[pkg] = true | ||||
| w.string(pkg.Path()) | |||||
| w.string(w.exportPath(pkg)) | |||||
| } | } | ||||
| func (w *exportWriter) qualifiedIdent(obj types.Object) { | func (w *exportWriter) qualifiedIdent(obj types.Object) { | ||||
| @@ -394,7 +410,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { | |||||
| w.pos(f.Pos()) | w.pos(f.Pos()) | ||||
| w.string(f.Name()) | w.string(f.Name()) | ||||
| w.typ(f.Type(), pkg) | w.typ(f.Type(), pkg) | ||||
| w.bool(f.Embedded()) | |||||
| w.bool(f.Anonymous()) | |||||
| w.string(t.Tag(i)) // note (or tag) | w.string(t.Tag(i)) // note (or tag) | ||||
| } | } | ||||
| @@ -63,8 +63,8 @@ const ( | |||||
| // If the export data version is not recognized or the format is otherwise | // If the export data version is not recognized or the format is otherwise | ||||
| // compromised, an error is returned. | // compromised, an error is returned. | ||||
| func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { | func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { | ||||
| const currentVersion = 0 | |||||
| version := -1 | |||||
| const currentVersion = 1 | |||||
| version := int64(-1) | |||||
| defer func() { | defer func() { | ||||
| if e := recover(); e != nil { | if e := recover(); e != nil { | ||||
| if version > currentVersion { | if version > currentVersion { | ||||
| @@ -77,9 +77,9 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] | |||||
| r := &intReader{bytes.NewReader(data), path} | r := &intReader{bytes.NewReader(data), path} | ||||
| version = int(r.uint64()) | |||||
| version = int64(r.uint64()) | |||||
| switch version { | switch version { | ||||
| case currentVersion: | |||||
| case currentVersion, 0: | |||||
| default: | default: | ||||
| errorf("unknown iexport format version %d", version) | errorf("unknown iexport format version %d", version) | ||||
| } | } | ||||
| @@ -93,7 +93,8 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] | |||||
| r.Seek(sLen+dLen, io.SeekCurrent) | r.Seek(sLen+dLen, io.SeekCurrent) | ||||
| p := iimporter{ | p := iimporter{ | ||||
| ipath: path, | |||||
| ipath: path, | |||||
| version: int(version), | |||||
| stringData: stringData, | stringData: stringData, | ||||
| stringCache: make(map[uint64]string), | stringCache: make(map[uint64]string), | ||||
| @@ -142,20 +143,18 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] | |||||
| p.pkgIndex[pkg] = nameIndex | p.pkgIndex[pkg] = nameIndex | ||||
| pkgList[i] = pkg | pkgList[i] = pkg | ||||
| } | } | ||||
| var localpkg *types.Package | |||||
| for _, pkg := range pkgList { | |||||
| if pkg.Path() == path { | |||||
| localpkg = pkg | |||||
| } | |||||
| if len(pkgList) == 0 { | |||||
| errorf("no packages found for %s", path) | |||||
| panic("unreachable") | |||||
| } | } | ||||
| names := make([]string, 0, len(p.pkgIndex[localpkg])) | |||||
| for name := range p.pkgIndex[localpkg] { | |||||
| p.ipkg = pkgList[0] | |||||
| names := make([]string, 0, len(p.pkgIndex[p.ipkg])) | |||||
| for name := range p.pkgIndex[p.ipkg] { | |||||
| names = append(names, name) | names = append(names, name) | ||||
| } | } | ||||
| sort.Strings(names) | sort.Strings(names) | ||||
| for _, name := range names { | for _, name := range names { | ||||
| p.doDecl(localpkg, name) | |||||
| p.doDecl(p.ipkg, name) | |||||
| } | } | ||||
| for _, typ := range p.interfaceList { | for _, typ := range p.interfaceList { | ||||
| @@ -165,17 +164,19 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] | |||||
| // record all referenced packages as imports | // record all referenced packages as imports | ||||
| list := append(([]*types.Package)(nil), pkgList[1:]...) | list := append(([]*types.Package)(nil), pkgList[1:]...) | ||||
| sort.Sort(byPath(list)) | sort.Sort(byPath(list)) | ||||
| localpkg.SetImports(list) | |||||
| p.ipkg.SetImports(list) | |||||
| // package was imported completely and without errors | // package was imported completely and without errors | ||||
| localpkg.MarkComplete() | |||||
| p.ipkg.MarkComplete() | |||||
| consumed, _ := r.Seek(0, io.SeekCurrent) | consumed, _ := r.Seek(0, io.SeekCurrent) | ||||
| return int(consumed), localpkg, nil | |||||
| return int(consumed), p.ipkg, nil | |||||
| } | } | ||||
| type iimporter struct { | type iimporter struct { | ||||
| ipath string | |||||
| ipath string | |||||
| ipkg *types.Package | |||||
| version int | |||||
| stringData []byte | stringData []byte | ||||
| stringCache map[uint64]string | stringCache map[uint64]string | ||||
| @@ -226,6 +227,9 @@ func (p *iimporter) pkgAt(off uint64) *types.Package { | |||||
| return pkg | return pkg | ||||
| } | } | ||||
| path := p.stringAt(off) | path := p.stringAt(off) | ||||
| if path == p.ipath { | |||||
| return p.ipkg | |||||
| } | |||||
| errorf("missing package %q in %q", path, p.ipath) | errorf("missing package %q in %q", path, p.ipath) | ||||
| return nil | return nil | ||||
| } | } | ||||
| @@ -255,6 +259,7 @@ type importReader struct { | |||||
| currPkg *types.Package | currPkg *types.Package | ||||
| prevFile string | prevFile string | ||||
| prevLine int64 | prevLine int64 | ||||
| prevColumn int64 | |||||
| } | } | ||||
| func (r *importReader) obj(name string) { | func (r *importReader) obj(name string) { | ||||
| @@ -448,6 +453,19 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) { | |||||
| } | } | ||||
| func (r *importReader) pos() token.Pos { | func (r *importReader) pos() token.Pos { | ||||
| if r.p.version >= 1 { | |||||
| r.posv1() | |||||
| } else { | |||||
| r.posv0() | |||||
| } | |||||
| if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { | |||||
| return token.NoPos | |||||
| } | |||||
| return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) | |||||
| } | |||||
| func (r *importReader) posv0() { | |||||
| delta := r.int64() | delta := r.int64() | ||||
| if delta != deltaNewFile { | if delta != deltaNewFile { | ||||
| r.prevLine += delta | r.prevLine += delta | ||||
| @@ -457,12 +475,18 @@ func (r *importReader) pos() token.Pos { | |||||
| r.prevFile = r.string() | r.prevFile = r.string() | ||||
| r.prevLine = l | r.prevLine = l | ||||
| } | } | ||||
| } | |||||
| if r.prevFile == "" && r.prevLine == 0 { | |||||
| return token.NoPos | |||||
| func (r *importReader) posv1() { | |||||
| delta := r.int64() | |||||
| r.prevColumn += delta >> 1 | |||||
| if delta&1 != 0 { | |||||
| delta = r.int64() | |||||
| r.prevLine += delta >> 1 | |||||
| if delta&1 != 0 { | |||||
| r.prevFile = r.string() | |||||
| } | |||||
| } | } | ||||
| return r.p.fake.pos(r.prevFile, int(r.prevLine)) | |||||
| } | } | ||||
| func (r *importReader) typ() types.Type { | func (r *importReader) typ() types.Type { | ||||
| @@ -81,13 +81,13 @@ func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, u | |||||
| args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"} | args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"} | ||||
| args = append(args, buildFlags...) | args = append(args, buildFlags...) | ||||
| args = append(args, "--", "unsafe") | args = append(args, "--", "unsafe") | ||||
| stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...) | |||||
| stdout, stderr, err := invokeGo(ctx, env, dir, usesExportData, args...) | |||||
| var goarch, compiler string | var goarch, compiler string | ||||
| if err != nil { | if err != nil { | ||||
| if strings.Contains(err.Error(), "cannot find main module") { | if strings.Contains(err.Error(), "cannot find main module") { | ||||
| // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. | // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. | ||||
| // TODO(matloob): Is this a problem in practice? | // TODO(matloob): Is this a problem in practice? | ||||
| envout, enverr := InvokeGo(ctx, env, dir, usesExportData, "env", "GOARCH") | |||||
| envout, _, enverr := invokeGo(ctx, env, dir, usesExportData, "env", "GOARCH") | |||||
| if enverr != nil { | if enverr != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| @@ -99,7 +99,8 @@ func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, u | |||||
| } else { | } else { | ||||
| fields := strings.Fields(stdout.String()) | fields := strings.Fields(stdout.String()) | ||||
| if len(fields) < 2 { | if len(fields) < 2 { | ||||
| return nil, fmt.Errorf("could not determine GOARCH and Go compiler") | |||||
| return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\" from stdout of go command:\n%s\ndir: %s\nstdout: <<%s>>\nstderr: <<%s>>", | |||||
| cmdDebugStr(env, args...), dir, stdout.String(), stderr.String()) | |||||
| } | } | ||||
| goarch = fields[0] | goarch = fields[0] | ||||
| compiler = fields[1] | compiler = fields[1] | ||||
| @@ -107,8 +108,8 @@ func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, u | |||||
| return types.SizesFor(compiler, goarch), nil | return types.SizesFor(compiler, goarch), nil | ||||
| } | } | ||||
| // InvokeGo returns the stdout of a go command invocation. | |||||
| func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) { | |||||
| // invokeGo returns the stdout and stderr of a go command invocation. | |||||
| func invokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, *bytes.Buffer, error) { | |||||
| if debug { | if debug { | ||||
| defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now()) | defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now()) | ||||
| } | } | ||||
| @@ -131,7 +132,7 @@ func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool | |||||
| // Catastrophic error: | // Catastrophic error: | ||||
| // - executable not found | // - executable not found | ||||
| // - context cancellation | // - context cancellation | ||||
| return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) | |||||
| return nil, nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) | |||||
| } | } | ||||
| // Export mode entails a build. | // Export mode entails a build. | ||||
| @@ -139,7 +140,7 @@ func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool | |||||
| // (despite the -e flag) and the Export field is blank. | // (despite the -e flag) and the Export field is blank. | ||||
| // Do not fail in that case. | // Do not fail in that case. | ||||
| if !usesExportData { | if !usesExportData { | ||||
| return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) | |||||
| return nil, nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) | |||||
| } | } | ||||
| } | } | ||||
| @@ -158,7 +159,7 @@ func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool | |||||
| fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout) | fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout) | ||||
| } | } | ||||
| return stdout, nil | |||||
| return stdout, stderr, nil | |||||
| } | } | ||||
| func cmdDebugStr(envlist []string, args ...string) string { | func cmdDebugStr(envlist []string, args ...string) string { | ||||
| @@ -60,8 +60,7 @@ causes Load to run in LoadFiles mode, collecting minimal information. | |||||
| See the documentation for type Config for details. | See the documentation for type Config for details. | ||||
| As noted earlier, the Config.Mode controls the amount of detail | As noted earlier, the Config.Mode controls the amount of detail | ||||
| reported about the loaded packages, with each mode returning all the data of the | |||||
| previous mode with some extra added. See the documentation for type LoadMode | |||||
| reported about the loaded packages. See the documentation for type LoadMode | |||||
| for details. | for details. | ||||
| Most tools should pass their command-line arguments (after any flags) | Most tools should pass their command-line arguments (after any flags) | ||||
| @@ -12,6 +12,7 @@ import ( | |||||
| "bytes" | "bytes" | ||||
| "encoding/json" | "encoding/json" | ||||
| "fmt" | "fmt" | ||||
| "os" | |||||
| "os/exec" | "os/exec" | ||||
| "strings" | "strings" | ||||
| ) | ) | ||||
| @@ -76,15 +77,21 @@ func findExternalDriver(cfg *Config) driver { | |||||
| } | } | ||||
| buf := new(bytes.Buffer) | buf := new(bytes.Buffer) | ||||
| stderr := new(bytes.Buffer) | |||||
| cmd := exec.CommandContext(cfg.Context, tool, words...) | cmd := exec.CommandContext(cfg.Context, tool, words...) | ||||
| cmd.Dir = cfg.Dir | cmd.Dir = cfg.Dir | ||||
| cmd.Env = cfg.Env | cmd.Env = cfg.Env | ||||
| cmd.Stdin = bytes.NewReader(req) | cmd.Stdin = bytes.NewReader(req) | ||||
| cmd.Stdout = buf | cmd.Stdout = buf | ||||
| cmd.Stderr = new(bytes.Buffer) | |||||
| cmd.Stderr = stderr | |||||
| if err := cmd.Run(); err != nil { | if err := cmd.Run(); err != nil { | ||||
| return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) | return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) | ||||
| } | } | ||||
| if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { | |||||
| fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr) | |||||
| } | |||||
| var response driverResponse | var response driverResponse | ||||
| if err := json.Unmarshal(buf.Bytes(), &response); err != nil { | if err := json.Unmarshal(buf.Bytes(), &response); err != nil { | ||||
| return nil, err | return nil, err | ||||
| @@ -109,6 +109,7 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { | |||||
| sizeswg.Done() | sizeswg.Done() | ||||
| }() | }() | ||||
| } | } | ||||
| defer sizeswg.Wait() | |||||
| // start fetching rootDirs | // start fetching rootDirs | ||||
| var info goInfo | var info goInfo | ||||
| @@ -127,6 +128,10 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { | |||||
| return &info | return &info | ||||
| } | } | ||||
| // Ensure that we don't leak goroutines: Load is synchronous, so callers will | |||||
| // not expect it to access the fields of cfg after the call returns. | |||||
| defer getGoInfo() | |||||
| // always pass getGoInfo to golistDriver | // always pass getGoInfo to golistDriver | ||||
| golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) { | golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) { | ||||
| return golistDriver(cfg, getGoInfo, patterns...) | return golistDriver(cfg, getGoInfo, patterns...) | ||||
| @@ -248,12 +253,7 @@ func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDedu | |||||
| if len(pkgs) == 0 { | if len(pkgs) == 0 { | ||||
| return nil | return nil | ||||
| } | } | ||||
| drivercfg := *cfg | |||||
| if getGoInfo().env.modulesOn { | |||||
| drivercfg.BuildFlags = append(drivercfg.BuildFlags, "-mod=readonly") | |||||
| } | |||||
| dr, err := driver(&drivercfg, pkgs...) | |||||
| dr, err := driver(cfg, pkgs...) | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| @@ -264,10 +264,7 @@ func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDedu | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| if err := addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo); err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| return addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo) | |||||
| } | } | ||||
| func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error { | func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error { | ||||
| @@ -291,18 +288,31 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q | |||||
| // Return the original error if the attempt to fall back failed. | // Return the original error if the attempt to fall back failed. | ||||
| return err | return err | ||||
| } | } | ||||
| // If we get nothing back from `go list`, try to make this file into its own ad-hoc package. | |||||
| if len(dirResponse.Packages) == 0 && queryErr == nil { | |||||
| dirResponse.Packages = append(dirResponse.Packages, &Package{ | |||||
| ID: "command-line-arguments", | |||||
| PkgPath: query, | |||||
| GoFiles: []string{query}, | |||||
| CompiledGoFiles: []string{query}, | |||||
| Imports: make(map[string]*Package), | |||||
| }) | |||||
| dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments") | |||||
| } | |||||
| // Special case to handle issue #33482: | // Special case to handle issue #33482: | ||||
| // If this is a file= query for ad-hoc packages where the file only exists on an overlay, | // If this is a file= query for ad-hoc packages where the file only exists on an overlay, | ||||
| // and exists outside of a module, add the file in for the package. | // and exists outside of a module, add the file in for the package. | ||||
| if len(dirResponse.Packages) == 1 && len(dirResponse.Packages) == 1 && | |||||
| dirResponse.Packages[0].ID == "command-line-arguments" && len(dirResponse.Packages[0].GoFiles) == 0 { | |||||
| filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath | |||||
| // TODO(matloob): check if the file is outside of a root dir? | |||||
| for path := range cfg.Overlay { | |||||
| if path == filename { | |||||
| dirResponse.Packages[0].Errors = nil | |||||
| dirResponse.Packages[0].GoFiles = []string{path} | |||||
| dirResponse.Packages[0].CompiledGoFiles = []string{path} | |||||
| if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" || | |||||
| filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) { | |||||
| if len(dirResponse.Packages[0].GoFiles) == 0 { | |||||
| filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath | |||||
| // TODO(matloob): check if the file is outside of a root dir? | |||||
| for path := range cfg.Overlay { | |||||
| if path == filename { | |||||
| dirResponse.Packages[0].Errors = nil | |||||
| dirResponse.Packages[0].GoFiles = []string{path} | |||||
| dirResponse.Packages[0].CompiledGoFiles = []string{path} | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -395,6 +405,10 @@ func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, quer | |||||
| } | } | ||||
| files, err := ioutil.ReadDir(modRoot) | files, err := ioutil.ReadDir(modRoot) | ||||
| if err != nil { | |||||
| panic(err) // See above. | |||||
| } | |||||
| for _, f := range files { | for _, f := range files { | ||||
| if strings.HasSuffix(f.Name(), ".go") { | if strings.HasSuffix(f.Name(), ".go") { | ||||
| simpleMatches = append(simpleMatches, rel) | simpleMatches = append(simpleMatches, rel) | ||||
| @@ -462,7 +476,7 @@ func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, quer | |||||
| // We're only trying to look at stuff in the module cache, so | // We're only trying to look at stuff in the module cache, so | ||||
| // disable the network. This should speed things up, and has | // disable the network. This should speed things up, and has | ||||
| // prevented errors in at least one case, #28518. | // prevented errors in at least one case, #28518. | ||||
| tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...)) | |||||
| tmpCfg.Env = append([]string{"GOPROXY=off"}, cfg.Env...) | |||||
| var err error | var err error | ||||
| tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") | tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") | ||||
| @@ -510,17 +524,29 @@ func roots(cfg *Config) ([]gopathwalk.Root, string, error) { | |||||
| var roots []gopathwalk.Root | var roots []gopathwalk.Root | ||||
| // Always add GOROOT. | // Always add GOROOT. | ||||
| roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT}) | |||||
| roots = append(roots, gopathwalk.Root{ | |||||
| Path: filepath.Join(goroot, "/src"), | |||||
| Type: gopathwalk.RootGOROOT, | |||||
| }) | |||||
| // If modules are enabled, scan the module dir. | // If modules are enabled, scan the module dir. | ||||
| if modDir != "" { | if modDir != "" { | ||||
| roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule}) | |||||
| roots = append(roots, gopathwalk.Root{ | |||||
| Path: modDir, | |||||
| Type: gopathwalk.RootCurrentModule, | |||||
| }) | |||||
| } | } | ||||
| // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. | // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. | ||||
| for _, p := range gopath { | for _, p := range gopath { | ||||
| if modDir != "" { | if modDir != "" { | ||||
| roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache}) | |||||
| roots = append(roots, gopathwalk.Root{ | |||||
| Path: filepath.Join(p, "/pkg/mod"), | |||||
| Type: gopathwalk.RootModuleCache, | |||||
| }) | |||||
| } else { | } else { | ||||
| roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH}) | |||||
| roots = append(roots, gopathwalk.Root{ | |||||
| Path: filepath.Join(p, "/src"), | |||||
| Type: gopathwalk.RootGOPATH, | |||||
| }) | |||||
| } | } | ||||
| } | } | ||||
| @@ -637,9 +663,9 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv | |||||
| // go list uses the following identifiers in ImportPath and Imports: | // go list uses the following identifiers in ImportPath and Imports: | ||||
| // | // | ||||
| // "p" -- importable package or main (command) | // "p" -- importable package or main (command) | ||||
| // "q.test" -- q's test executable | |||||
| // "q.test" -- q's test executable | |||||
| // "p [q.test]" -- variant of p as built for q's test executable | // "p [q.test]" -- variant of p as built for q's test executable | ||||
| // "q_test [q.test]" -- q's external test package | |||||
| // "q_test [q.test]" -- q's external test package | |||||
| // | // | ||||
| // The packages p that are built differently for a test q.test | // The packages p that are built differently for a test q.test | ||||
| // are q itself, plus any helpers used by the external test q_test, | // are q itself, plus any helpers used by the external test q_test, | ||||
| @@ -678,11 +704,11 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv | |||||
| // go list -e, when given an absolute path, will find the package contained at | // go list -e, when given an absolute path, will find the package contained at | ||||
| // that directory. But when no package exists there, it will return a fake package | // that directory. But when no package exists there, it will return a fake package | ||||
| // with an error and the ImportPath set to the absolute path provided to go list. | // with an error and the ImportPath set to the absolute path provided to go list. | ||||
| // Try toto convert that absolute path to what its package path would be if it's | |||||
| // Try to convert that absolute path to what its package path would be if it's | |||||
| // contained in a known module or GOPATH entry. This will allow the package to be | // contained in a known module or GOPATH entry. This will allow the package to be | ||||
| // properly "reclaimed" when overlays are processed. | // properly "reclaimed" when overlays are processed. | ||||
| if filepath.IsAbs(p.ImportPath) && p.Error != nil { | if filepath.IsAbs(p.ImportPath) && p.Error != nil { | ||||
| pkgPath, ok := getPkgPath(p.ImportPath, rootsDirs) | |||||
| pkgPath, ok := getPkgPath(cfg, p.ImportPath, rootsDirs) | |||||
| if ok { | if ok { | ||||
| p.ImportPath = pkgPath | p.ImportPath = pkgPath | ||||
| } | } | ||||
| @@ -779,9 +805,14 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv | |||||
| } | } | ||||
| if p.Error != nil { | if p.Error != nil { | ||||
| msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. | |||||
| // Address golang.org/issue/35964 by appending import stack to error message. | |||||
| if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { | |||||
| msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) | |||||
| } | |||||
| pkg.Errors = append(pkg.Errors, Error{ | pkg.Errors = append(pkg.Errors, Error{ | ||||
| Pos: p.Error.Pos, | Pos: p.Error.Pos, | ||||
| Msg: strings.TrimSpace(p.Error.Err), // Trim to work around golang.org/issue/32363. | |||||
| Msg: msg, | |||||
| }) | }) | ||||
| } | } | ||||
| @@ -792,15 +823,31 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv | |||||
| } | } | ||||
| // getPkgPath finds the package path of a directory if it's relative to a root directory. | // getPkgPath finds the package path of a directory if it's relative to a root directory. | ||||
| func getPkgPath(dir string, goInfo func() *goInfo) (string, bool) { | |||||
| func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { | |||||
| absDir, err := filepath.Abs(dir) | |||||
| if err != nil { | |||||
| cfg.Logf("error getting absolute path of %s: %v", dir, err) | |||||
| return "", false | |||||
| } | |||||
| for rdir, rpath := range goInfo().rootDirs { | for rdir, rpath := range goInfo().rootDirs { | ||||
| absRdir, err := filepath.Abs(rdir) | |||||
| if err != nil { | |||||
| cfg.Logf("error getting absolute path of %s: %v", rdir, err) | |||||
| continue | |||||
| } | |||||
| // Make sure that the directory is in the module, | |||||
| // to avoid creating a path relative to another module. | |||||
| if !strings.HasPrefix(absDir, absRdir) { | |||||
| cfg.Logf("%s does not have prefix %s", absDir, absRdir) | |||||
| continue | |||||
| } | |||||
| // TODO(matloob): This doesn't properly handle symlinks. | // TODO(matloob): This doesn't properly handle symlinks. | ||||
| r, err := filepath.Rel(rdir, dir) | r, err := filepath.Rel(rdir, dir) | ||||
| if err != nil { | if err != nil { | ||||
| continue | continue | ||||
| } | } | ||||
| if rpath != "" { | if rpath != "" { | ||||
| // We choose only ore root even though the directory even it can belong in multiple modules | |||||
| // We choose only one root even though the directory even it can belong in multiple modules | |||||
| // or GOPATH entries. This is okay because we only need to work with absolute dirs when a | // or GOPATH entries. This is okay because we only need to work with absolute dirs when a | ||||
| // file is missing from disk, for instance when gopls calls go/packages in an overlay. | // file is missing from disk, for instance when gopls calls go/packages in an overlay. | ||||
| // Once the file is saved, gopls, or the next invocation of the tool will get the correct | // Once the file is saved, gopls, or the next invocation of the tool will get the correct | ||||
| @@ -808,6 +855,7 @@ func getPkgPath(dir string, goInfo func() *goInfo) (string, bool) { | |||||
| // TODO(matloob): Implement module tiebreaking? | // TODO(matloob): Implement module tiebreaking? | ||||
| return path.Join(rpath, filepath.ToSlash(r)), true | return path.Join(rpath, filepath.ToSlash(r)), true | ||||
| } | } | ||||
| return filepath.ToSlash(r), true | |||||
| } | } | ||||
| return "", false | return "", false | ||||
| } | } | ||||
| @@ -832,8 +880,7 @@ func golistargs(cfg *Config, words []string) []string { | |||||
| fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), | fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), | ||||
| fmt.Sprintf("-test=%t", cfg.Tests), | fmt.Sprintf("-test=%t", cfg.Tests), | ||||
| fmt.Sprintf("-export=%t", usesExportData(cfg)), | fmt.Sprintf("-export=%t", usesExportData(cfg)), | ||||
| fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0 || | |||||
| cfg.Mode&NeedTypesInfo != 0), // Dependencies are required to do typechecking on sources, which is required for the TypesInfo. | |||||
| fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), | |||||
| // go list doesn't let you pass -test and -find together, | // go list doesn't let you pass -test and -find together, | ||||
| // probably because you'd just get the TestMain. | // probably because you'd just get the TestMain. | ||||
| fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), | fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), | ||||
| @@ -860,7 +907,7 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { | |||||
| cmd.Stdout = stdout | cmd.Stdout = stdout | ||||
| cmd.Stderr = stderr | cmd.Stderr = stderr | ||||
| defer func(start time.Time) { | defer func(start time.Time) { | ||||
| cfg.Logf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr) | |||||
| cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr, stdout) | |||||
| }(time.Now()) | }(time.Now()) | ||||
| if err := cmd.Run(); err != nil { | if err := cmd.Run(); err != nil { | ||||
| @@ -897,7 +944,7 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { | |||||
| // (the Graphic characters without spaces) and may also exclude the | // (the Graphic characters without spaces) and may also exclude the | ||||
| // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. | // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. | ||||
| return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && | return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && | ||||
| strings.IndexRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) == -1 | |||||
| !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) | |||||
| } | } | ||||
| if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { | if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { | ||||
| if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") { | if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") { | ||||
| @@ -941,10 +988,31 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { | |||||
| return bytes.NewBufferString(output), nil | return bytes.NewBufferString(output), nil | ||||
| } | } | ||||
| // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a | |||||
| // directory outside any module. | |||||
| if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { | |||||
| output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, | |||||
| // TODO(matloob): command-line-arguments isn't correct here. | |||||
| "command-line-arguments", strings.Trim(stderr.String(), "\n")) | |||||
| return bytes.NewBufferString(output), nil | |||||
| } | |||||
| // Another variation of the previous error | |||||
| if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { | |||||
| output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, | |||||
| // TODO(matloob): command-line-arguments isn't correct here. | |||||
| "command-line-arguments", strings.Trim(stderr.String(), "\n")) | |||||
| return bytes.NewBufferString(output), nil | |||||
| } | |||||
| // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit | // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit | ||||
| // status if there's a dependency on a package that doesn't exist. But it should return | // status if there's a dependency on a package that doesn't exist. But it should return | ||||
| // a zero exit status and set an error on that package. | // a zero exit status and set an error on that package. | ||||
| if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { | if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { | ||||
| // Don't clobber stdout if `go list` actually returned something. | |||||
| if len(stdout.String()) > 0 { | |||||
| return stdout, nil | |||||
| } | |||||
| // try to extract package name from string | // try to extract package name from string | ||||
| stderrStr := stderr.String() | stderrStr := stderr.String() | ||||
| var importPath string | var importPath string | ||||
| @@ -978,12 +1046,6 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { | |||||
| if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { | if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { | ||||
| fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) | fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) | ||||
| } | } | ||||
| // debugging | |||||
| if false { | |||||
| fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout) | |||||
| } | |||||
| return stdout, nil | return stdout, nil | ||||
| } | } | ||||
| @@ -6,7 +6,6 @@ import ( | |||||
| "fmt" | "fmt" | ||||
| "go/parser" | "go/parser" | ||||
| "go/token" | "go/token" | ||||
| "path" | |||||
| "path/filepath" | "path/filepath" | ||||
| "strconv" | "strconv" | ||||
| "strings" | "strings" | ||||
| @@ -38,10 +37,10 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( | |||||
| for opath, contents := range cfg.Overlay { | for opath, contents := range cfg.Overlay { | ||||
| base := filepath.Base(opath) | base := filepath.Base(opath) | ||||
| dir := filepath.Dir(opath) | dir := filepath.Dir(opath) | ||||
| var pkg *Package | |||||
| var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant | |||||
| var testVariantOf *Package // if opath is a test file, this is the package it is testing | var testVariantOf *Package // if opath is a test file, this is the package it is testing | ||||
| var fileExists bool | var fileExists bool | ||||
| isTest := strings.HasSuffix(opath, "_test.go") | |||||
| isTestFile := strings.HasSuffix(opath, "_test.go") | |||||
| pkgName, ok := extractPackageName(opath, contents) | pkgName, ok := extractPackageName(opath, contents) | ||||
| if !ok { | if !ok { | ||||
| // Don't bother adding a file that doesn't even have a parsable package statement | // Don't bother adding a file that doesn't even have a parsable package statement | ||||
| @@ -57,13 +56,26 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( | |||||
| if !sameFile(filepath.Dir(f), dir) { | if !sameFile(filepath.Dir(f), dir) { | ||||
| continue | continue | ||||
| } | } | ||||
| if isTest && !hasTestFiles(p) { | |||||
| // Make sure to capture information on the package's test variant, if needed. | |||||
| if isTestFile && !hasTestFiles(p) { | |||||
| // TODO(matloob): Are there packages other than the 'production' variant | // TODO(matloob): Are there packages other than the 'production' variant | ||||
| // of a package that this can match? This shouldn't match the test main package | // of a package that this can match? This shouldn't match the test main package | ||||
| // because the file is generated in another directory. | // because the file is generated in another directory. | ||||
| testVariantOf = p | testVariantOf = p | ||||
| continue nextPackage | continue nextPackage | ||||
| } | } | ||||
| if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { | |||||
| // If we've already seen the test variant, | |||||
| // make sure to label which package it is a test variant of. | |||||
| if hasTestFiles(pkg) { | |||||
| testVariantOf = p | |||||
| continue nextPackage | |||||
| } | |||||
| // If we have already seen the package of which this is a test variant. | |||||
| if hasTestFiles(p) { | |||||
| testVariantOf = pkg | |||||
| } | |||||
| } | |||||
| pkg = p | pkg = p | ||||
| if filepath.Base(f) == base { | if filepath.Base(f) == base { | ||||
| fileExists = true | fileExists = true | ||||
| @@ -74,32 +86,16 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( | |||||
| if pkg == nil { | if pkg == nil { | ||||
| // Try to find the module or gopath dir the file is contained in. | // Try to find the module or gopath dir the file is contained in. | ||||
| // Then for modules, add the module opath to the beginning. | // Then for modules, add the module opath to the beginning. | ||||
| var pkgPath string | |||||
| for rdir, rpath := range rootDirs().rootDirs { | |||||
| // TODO(matloob): This doesn't properly handle symlinks. | |||||
| r, err := filepath.Rel(rdir, dir) | |||||
| if err != nil { | |||||
| continue | |||||
| } | |||||
| pkgPath = filepath.ToSlash(r) | |||||
| if rpath != "" { | |||||
| pkgPath = path.Join(rpath, pkgPath) | |||||
| } | |||||
| // We only create one new package even it can belong in multiple modules or GOPATH entries. | |||||
| // This is okay because tools (such as the LSP) that use overlays will recompute the overlay | |||||
| // once the file is saved, and golist will do the right thing. | |||||
| // TODO(matloob): Implement module tiebreaking? | |||||
| pkgPath, ok := getPkgPath(cfg, dir, rootDirs) | |||||
| if !ok { | |||||
| break | break | ||||
| } | } | ||||
| if pkgPath == "" { | |||||
| continue | |||||
| } | |||||
| isXTest := strings.HasSuffix(pkgName, "_test") | isXTest := strings.HasSuffix(pkgName, "_test") | ||||
| if isXTest { | if isXTest { | ||||
| pkgPath += "_test" | pkgPath += "_test" | ||||
| } | } | ||||
| id := pkgPath | id := pkgPath | ||||
| if isTest && !isXTest { | |||||
| if isTestFile && !isXTest { | |||||
| id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) | id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) | ||||
| } | } | ||||
| // Try to reclaim a package with the same id if it exists in the response. | // Try to reclaim a package with the same id if it exists in the response. | ||||
| @@ -115,7 +111,7 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( | |||||
| response.addPackage(pkg) | response.addPackage(pkg) | ||||
| havePkgs[pkg.PkgPath] = id | havePkgs[pkg.PkgPath] = id | ||||
| // Add the production package's sources for a test variant. | // Add the production package's sources for a test variant. | ||||
| if isTest && !isXTest && testVariantOf != nil { | |||||
| if isTestFile && !isXTest && testVariantOf != nil { | |||||
| pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) | pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) | ||||
| pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) | pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) | ||||
| } | } | ||||
| @@ -138,12 +134,16 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( | |||||
| if !found { | if !found { | ||||
| overlayAddsImports = true | overlayAddsImports = true | ||||
| // TODO(matloob): Handle cases when the following block isn't correct. | // TODO(matloob): Handle cases when the following block isn't correct. | ||||
| // These include imports of test variants, imports of vendored packages, etc. | |||||
| // These include imports of vendored packages, etc. | |||||
| id, ok := havePkgs[imp] | id, ok := havePkgs[imp] | ||||
| if !ok { | if !ok { | ||||
| id = imp | id = imp | ||||
| } | } | ||||
| pkg.Imports[imp] = &Package{ID: id} | pkg.Imports[imp] = &Package{ID: id} | ||||
| // Add dependencies to the non-test variant version of this package as wel. | |||||
| if testVariantOf != nil { | |||||
| testVariantOf.Imports[imp] = &Package{ID: id} | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| continue | continue | ||||
| @@ -0,0 +1,57 @@ | |||||
| // Copyright 2019 The Go Authors. All rights reserved. | |||||
| // Use of this source code is governed by a BSD-style | |||||
| // license that can be found in the LICENSE file. | |||||
| package packages | |||||
| import ( | |||||
| "fmt" | |||||
| "strings" | |||||
| ) | |||||
| var allModes = []LoadMode{ | |||||
| NeedName, | |||||
| NeedFiles, | |||||
| NeedCompiledGoFiles, | |||||
| NeedImports, | |||||
| NeedDeps, | |||||
| NeedExportsFile, | |||||
| NeedTypes, | |||||
| NeedSyntax, | |||||
| NeedTypesInfo, | |||||
| NeedTypesSizes, | |||||
| } | |||||
| var modeStrings = []string{ | |||||
| "NeedName", | |||||
| "NeedFiles", | |||||
| "NeedCompiledGoFiles", | |||||
| "NeedImports", | |||||
| "NeedDeps", | |||||
| "NeedExportsFile", | |||||
| "NeedTypes", | |||||
| "NeedSyntax", | |||||
| "NeedTypesInfo", | |||||
| "NeedTypesSizes", | |||||
| } | |||||
| func (mod LoadMode) String() string { | |||||
| m := mod | |||||
| if m == 0 { | |||||
| return fmt.Sprintf("LoadMode(0)") | |||||
| } | |||||
| var out []string | |||||
| for i, x := range allModes { | |||||
| if x > m { | |||||
| break | |||||
| } | |||||
| if (m & x) != 0 { | |||||
| out = append(out, modeStrings[i]) | |||||
| m = m ^ x | |||||
| } | |||||
| } | |||||
| if m != 0 { | |||||
| out = append(out, "Unknown") | |||||
| } | |||||
| return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) | |||||
| } | |||||
| @@ -48,8 +48,7 @@ const ( | |||||
| // "placeholder" Packages with only the ID set. | // "placeholder" Packages with only the ID set. | ||||
| NeedImports | NeedImports | ||||
| // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. If NeedImports | |||||
| // is not set NeedDeps has no effect. | |||||
| // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. | |||||
| NeedDeps | NeedDeps | ||||
| // NeedExportsFile adds ExportsFile. | // NeedExportsFile adds ExportsFile. | ||||
| @@ -75,7 +74,7 @@ const ( | |||||
| // Deprecated: LoadImports exists for historical compatibility | // Deprecated: LoadImports exists for historical compatibility | ||||
| // and should not be used. Please directly specify the needed fields using the Need values. | // and should not be used. Please directly specify the needed fields using the Need values. | ||||
| LoadImports = LoadFiles | NeedImports | NeedDeps | |||||
| LoadImports = LoadFiles | NeedImports | |||||
| // Deprecated: LoadTypes exists for historical compatibility | // Deprecated: LoadTypes exists for historical compatibility | ||||
| // and should not be used. Please directly specify the needed fields using the Need values. | // and should not be used. Please directly specify the needed fields using the Need values. | ||||
| @@ -87,7 +86,7 @@ const ( | |||||
| // Deprecated: LoadAllSyntax exists for historical compatibility | // Deprecated: LoadAllSyntax exists for historical compatibility | ||||
| // and should not be used. Please directly specify the needed fields using the Need values. | // and should not be used. Please directly specify the needed fields using the Need values. | ||||
| LoadAllSyntax = LoadSyntax | |||||
| LoadAllSyntax = LoadSyntax | NeedDeps | |||||
| ) | ) | ||||
| // A Config specifies details about how packages should be loaded. | // A Config specifies details about how packages should be loaded. | ||||
| @@ -416,11 +415,13 @@ type loader struct { | |||||
| parseCacheMu sync.Mutex | parseCacheMu sync.Mutex | ||||
| exportMu sync.Mutex // enforces mutual exclusion of exportdata operations | exportMu sync.Mutex // enforces mutual exclusion of exportdata operations | ||||
| // TODO(matloob): Add an implied mode here and use that instead of mode. | |||||
| // Implied mode would contain all the fields we need the data for so we can | |||||
| // get the actually requested fields. We'll zero them out before returning | |||||
| // packages to the user. This will make it easier for us to get the conditions | |||||
| // where we need certain modes right. | |||||
| // Config.Mode contains the implied mode (see impliedLoadMode). | |||||
| // Implied mode contains all the fields we need the data for. | |||||
| // In requestedMode there are the actually requested fields. | |||||
| // We'll zero them out before returning packages to the user. | |||||
| // This makes it easier for us to get the conditions where | |||||
| // we need certain modes right. | |||||
| requestedMode LoadMode | |||||
| } | } | ||||
| type parseValue struct { | type parseValue struct { | ||||
| @@ -462,7 +463,11 @@ func newLoader(cfg *Config) *loader { | |||||
| } | } | ||||
| } | } | ||||
| if ld.Mode&NeedTypes != 0 { | |||||
| // Save the actually requested fields. We'll zero them out before returning packages to the user. | |||||
| ld.requestedMode = ld.Mode | |||||
| ld.Mode = impliedLoadMode(ld.Mode) | |||||
| if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { | |||||
| if ld.Fset == nil { | if ld.Fset == nil { | ||||
| ld.Fset = token.NewFileSet() | ld.Fset = token.NewFileSet() | ||||
| } | } | ||||
| @@ -476,6 +481,7 @@ func newLoader(cfg *Config) *loader { | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| return ld | return ld | ||||
| } | } | ||||
| @@ -496,7 +502,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { | |||||
| } | } | ||||
| lpkg := &loaderPackage{ | lpkg := &loaderPackage{ | ||||
| Package: pkg, | Package: pkg, | ||||
| needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0, | |||||
| needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0, | |||||
| needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 || | needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 || | ||||
| len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files | len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files | ||||
| pkg.ExportFile == "" && pkg.PkgPath != "unsafe", | pkg.ExportFile == "" && pkg.PkgPath != "unsafe", | ||||
| @@ -544,10 +550,8 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { | |||||
| lpkg.color = grey | lpkg.color = grey | ||||
| stack = append(stack, lpkg) // push | stack = append(stack, lpkg) // push | ||||
| stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports | stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports | ||||
| // If NeedTypesInfo we need dependencies (at least for the roots) to typecheck the package. | |||||
| // If NeedImports isn't set, the imports fields will all be zeroed out. | // If NeedImports isn't set, the imports fields will all be zeroed out. | ||||
| // If NeedDeps isn't also set we want to keep the stubs. | |||||
| if ld.Mode&NeedTypesInfo != 0 || (ld.Mode&NeedImports != 0 && ld.Mode&NeedDeps != 0) { | |||||
| if ld.Mode&NeedImports != 0 { | |||||
| lpkg.Imports = make(map[string]*Package, len(stubs)) | lpkg.Imports = make(map[string]*Package, len(stubs)) | ||||
| for importPath, ipkg := range stubs { | for importPath, ipkg := range stubs { | ||||
| var importErr error | var importErr error | ||||
| @@ -566,11 +570,8 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { | |||||
| continue | continue | ||||
| } | } | ||||
| // If !NeedDeps, just fill Imports for the root. No need to recurse further. | |||||
| if ld.Mode&NeedDeps != 0 { | |||||
| if visit(imp) { | |||||
| lpkg.needsrc = true | |||||
| } | |||||
| if visit(imp) { | |||||
| lpkg.needsrc = true | |||||
| } | } | ||||
| lpkg.Imports[importPath] = imp.Package | lpkg.Imports[importPath] = imp.Package | ||||
| } | } | ||||
| @@ -587,7 +588,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { | |||||
| return lpkg.needsrc | return lpkg.needsrc | ||||
| } | } | ||||
| if ld.Mode&(NeedImports|NeedDeps|NeedTypesInfo) == 0 { | |||||
| if ld.Mode&NeedImports == 0 { | |||||
| // We do this to drop the stub import packages that we are not even going to try to resolve. | // We do this to drop the stub import packages that we are not even going to try to resolve. | ||||
| for _, lpkg := range initial { | for _, lpkg := range initial { | ||||
| lpkg.Imports = nil | lpkg.Imports = nil | ||||
| @@ -598,7 +599,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { | |||||
| visit(lpkg) | visit(lpkg) | ||||
| } | } | ||||
| } | } | ||||
| if ld.Mode&NeedDeps != 0 { // TODO(matloob): This is only the case if NeedTypes is also set, right? | |||||
| if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { | |||||
| for _, lpkg := range srcPkgs { | for _, lpkg := range srcPkgs { | ||||
| // Complete type information is required for the | // Complete type information is required for the | ||||
| // immediate dependencies of each source package. | // immediate dependencies of each source package. | ||||
| @@ -608,9 +609,9 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| // Load type data if needed, starting at | |||||
| // Load type data and syntax if needed, starting at | |||||
| // the initial packages (roots of the import DAG). | // the initial packages (roots of the import DAG). | ||||
| if ld.Mode&NeedTypes != 0 { | |||||
| if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { | |||||
| var wg sync.WaitGroup | var wg sync.WaitGroup | ||||
| for _, lpkg := range initial { | for _, lpkg := range initial { | ||||
| wg.Add(1) | wg.Add(1) | ||||
| @@ -623,54 +624,44 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { | |||||
| } | } | ||||
| result := make([]*Package, len(initial)) | result := make([]*Package, len(initial)) | ||||
| importPlaceholders := make(map[string]*Package) | |||||
| for i, lpkg := range initial { | for i, lpkg := range initial { | ||||
| result[i] = lpkg.Package | result[i] = lpkg.Package | ||||
| } | } | ||||
| for i := range ld.pkgs { | for i := range ld.pkgs { | ||||
| // Clear all unrequested fields, for extra de-Hyrum-ization. | // Clear all unrequested fields, for extra de-Hyrum-ization. | ||||
| if ld.Mode&NeedName == 0 { | |||||
| if ld.requestedMode&NeedName == 0 { | |||||
| ld.pkgs[i].Name = "" | ld.pkgs[i].Name = "" | ||||
| ld.pkgs[i].PkgPath = "" | ld.pkgs[i].PkgPath = "" | ||||
| } | } | ||||
| if ld.Mode&NeedFiles == 0 { | |||||
| if ld.requestedMode&NeedFiles == 0 { | |||||
| ld.pkgs[i].GoFiles = nil | ld.pkgs[i].GoFiles = nil | ||||
| ld.pkgs[i].OtherFiles = nil | ld.pkgs[i].OtherFiles = nil | ||||
| } | } | ||||
| if ld.Mode&NeedCompiledGoFiles == 0 { | |||||
| if ld.requestedMode&NeedCompiledGoFiles == 0 { | |||||
| ld.pkgs[i].CompiledGoFiles = nil | ld.pkgs[i].CompiledGoFiles = nil | ||||
| } | } | ||||
| if ld.Mode&NeedImports == 0 { | |||||
| if ld.requestedMode&NeedImports == 0 { | |||||
| ld.pkgs[i].Imports = nil | ld.pkgs[i].Imports = nil | ||||
| } | } | ||||
| if ld.Mode&NeedExportsFile == 0 { | |||||
| if ld.requestedMode&NeedExportsFile == 0 { | |||||
| ld.pkgs[i].ExportFile = "" | ld.pkgs[i].ExportFile = "" | ||||
| } | } | ||||
| if ld.Mode&NeedTypes == 0 { | |||||
| if ld.requestedMode&NeedTypes == 0 { | |||||
| ld.pkgs[i].Types = nil | ld.pkgs[i].Types = nil | ||||
| ld.pkgs[i].Fset = nil | ld.pkgs[i].Fset = nil | ||||
| ld.pkgs[i].IllTyped = false | ld.pkgs[i].IllTyped = false | ||||
| } | } | ||||
| if ld.Mode&NeedSyntax == 0 { | |||||
| if ld.requestedMode&NeedSyntax == 0 { | |||||
| ld.pkgs[i].Syntax = nil | ld.pkgs[i].Syntax = nil | ||||
| } | } | ||||
| if ld.Mode&NeedTypesInfo == 0 { | |||||
| if ld.requestedMode&NeedTypesInfo == 0 { | |||||
| ld.pkgs[i].TypesInfo = nil | ld.pkgs[i].TypesInfo = nil | ||||
| } | } | ||||
| if ld.Mode&NeedTypesSizes == 0 { | |||||
| if ld.requestedMode&NeedTypesSizes == 0 { | |||||
| ld.pkgs[i].TypesSizes = nil | ld.pkgs[i].TypesSizes = nil | ||||
| } | } | ||||
| if ld.Mode&NeedDeps == 0 { | |||||
| for j, pkg := range ld.pkgs[i].Imports { | |||||
| ph, ok := importPlaceholders[pkg.ID] | |||||
| if !ok { | |||||
| ph = &Package{ID: pkg.ID} | |||||
| importPlaceholders[pkg.ID] = ph | |||||
| } | |||||
| ld.pkgs[i].Imports[j] = ph | |||||
| } | |||||
| } | |||||
| } | } | ||||
| return result, nil | return result, nil | ||||
| } | } | ||||
| @@ -691,7 +682,6 @@ func (ld *loader) loadRecursive(lpkg *loaderPackage) { | |||||
| }(imp) | }(imp) | ||||
| } | } | ||||
| wg.Wait() | wg.Wait() | ||||
| ld.loadPackage(lpkg) | ld.loadPackage(lpkg) | ||||
| }) | }) | ||||
| } | } | ||||
| @@ -780,12 +770,23 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { | |||||
| lpkg.Errors = append(lpkg.Errors, errs...) | lpkg.Errors = append(lpkg.Errors, errs...) | ||||
| } | } | ||||
| if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { | |||||
| // The config requested loading sources and types, but sources are missing. | |||||
| // Add an error to the package and fall back to loading from export data. | |||||
| appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) | |||||
| ld.loadFromExportData(lpkg) | |||||
| return // can't get syntax trees for this package | |||||
| } | |||||
| files, errs := ld.parseFiles(lpkg.CompiledGoFiles) | files, errs := ld.parseFiles(lpkg.CompiledGoFiles) | ||||
| for _, err := range errs { | for _, err := range errs { | ||||
| appendError(err) | appendError(err) | ||||
| } | } | ||||
| lpkg.Syntax = files | lpkg.Syntax = files | ||||
| if ld.Config.Mode&NeedTypes == 0 { | |||||
| return | |||||
| } | |||||
| lpkg.TypesInfo = &types.Info{ | lpkg.TypesInfo = &types.Info{ | ||||
| Types: make(map[ast.Expr]types.TypeAndValue), | Types: make(map[ast.Expr]types.TypeAndValue), | ||||
| @@ -818,7 +819,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { | |||||
| if ipkg.Types != nil && ipkg.Types.Complete() { | if ipkg.Types != nil && ipkg.Types.Complete() { | ||||
| return ipkg.Types, nil | return ipkg.Types, nil | ||||
| } | } | ||||
| log.Fatalf("internal error: nil Pkg importing %q from %q", path, lpkg) | |||||
| log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) | |||||
| panic("unreachable") | panic("unreachable") | ||||
| }) | }) | ||||
| @@ -829,7 +830,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { | |||||
| // Type-check bodies of functions only in non-initial packages. | // Type-check bodies of functions only in non-initial packages. | ||||
| // Example: for import graph A->B->C and initial packages {A,C}, | // Example: for import graph A->B->C and initial packages {A,C}, | ||||
| // we can ignore function bodies in B. | // we can ignore function bodies in B. | ||||
| IgnoreFuncBodies: (ld.Mode&(NeedDeps|NeedTypesInfo) == 0) && !lpkg.initial, | |||||
| IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, | |||||
| Error: appendError, | Error: appendError, | ||||
| Sizes: ld.sizes, | Sizes: ld.sizes, | ||||
| @@ -1091,10 +1092,25 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error | |||||
| return tpkg, nil | return tpkg, nil | ||||
| } | } | ||||
| // impliedLoadMode returns loadMode with its dependencies. | |||||
| func impliedLoadMode(loadMode LoadMode) LoadMode { | |||||
| if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 { | |||||
| // If NeedTypesInfo, go/packages needs to do typechecking itself so it can | |||||
| // associate type info with the AST. To do so, we need the export data | |||||
| // for dependencies, which means we need to ask for the direct dependencies. | |||||
| // NeedImports is used to ask for the direct dependencies. | |||||
| loadMode |= NeedImports | |||||
| } | |||||
| if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 { | |||||
| // With NeedDeps we need to load at least direct dependencies. | |||||
| // NeedImports is used to ask for the direct dependencies. | |||||
| loadMode |= NeedImports | |||||
| } | |||||
| return loadMode | |||||
| } | |||||
| func usesExportData(cfg *Config) bool { | func usesExportData(cfg *Config) bool { | ||||
| return cfg.Mode&NeedExportsFile != 0 || | |||||
| // If NeedTypes but not NeedTypesInfo we won't typecheck using sources, so we need export data. | |||||
| (cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedTypesInfo == 0) || | |||||
| // If NeedTypesInfo but not NeedDeps, we're typechecking a package using its sources plus its dependencies' export data | |||||
| (cfg.Mode&NeedTypesInfo != 0 && cfg.Mode&NeedDeps == 0) | |||||
| return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 | |||||
| } | } | ||||
| @@ -16,6 +16,7 @@ import ( | |||||
| "os" | "os" | ||||
| "path/filepath" | "path/filepath" | ||||
| "strings" | "strings" | ||||
| "time" | |||||
| "golang.org/x/tools/internal/fastwalk" | "golang.org/x/tools/internal/fastwalk" | ||||
| ) | ) | ||||
| @@ -83,8 +84,9 @@ func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) | |||||
| } | } | ||||
| return | return | ||||
| } | } | ||||
| start := time.Now() | |||||
| if opts.Debug { | if opts.Debug { | ||||
| log.Printf("scanning %s", root.Path) | |||||
| log.Printf("gopathwalk: scanning %s", root.Path) | |||||
| } | } | ||||
| w := &walker{ | w := &walker{ | ||||
| root: root, | root: root, | ||||
| @@ -98,7 +100,7 @@ func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) | |||||
| } | } | ||||
| if opts.Debug { | if opts.Debug { | ||||
| log.Printf("scanned %s", root.Path) | |||||
| log.Printf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) | |||||
| } | } | ||||
| } | } | ||||
| @@ -17,6 +17,7 @@ import ( | |||||
| "os/exec" | "os/exec" | ||||
| "path" | "path" | ||||
| "path/filepath" | "path/filepath" | ||||
| "reflect" | |||||
| "sort" | "sort" | ||||
| "strconv" | "strconv" | ||||
| "strings" | "strings" | ||||
| @@ -301,7 +302,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { | |||||
| if known != nil && known.name != "" { | if known != nil && known.name != "" { | ||||
| return known.name | return known.name | ||||
| } | } | ||||
| return importPathToAssumedName(imp.ImportPath) | |||||
| return ImportPathToAssumedName(imp.ImportPath) | |||||
| } | } | ||||
| // load reads in everything necessary to run a pass, and reports whether the | // load reads in everything necessary to run a pass, and reports whether the | ||||
| @@ -434,7 +435,7 @@ func (p *pass) importSpecName(imp *ImportInfo) string { | |||||
| } | } | ||||
| ident := p.importIdentifier(imp) | ident := p.importIdentifier(imp) | ||||
| if ident == importPathToAssumedName(imp.ImportPath) { | |||||
| if ident == ImportPathToAssumedName(imp.ImportPath) { | |||||
| return "" // ident not needed since the assumed and real names are the same. | return "" // ident not needed since the assumed and real names are the same. | ||||
| } | } | ||||
| return ident | return ident | ||||
| @@ -475,9 +476,9 @@ func (p *pass) assumeSiblingImportsValid() { | |||||
| } | } | ||||
| for left, rights := range refs { | for left, rights := range refs { | ||||
| if imp, ok := importsByName[left]; ok { | if imp, ok := importsByName[left]; ok { | ||||
| if _, ok := stdlib[imp.ImportPath]; ok { | |||||
| if m, ok := stdlib[imp.ImportPath]; ok { | |||||
| // We have the stdlib in memory; no need to guess. | // We have the stdlib in memory; no need to guess. | ||||
| rights = stdlib[imp.ImportPath] | |||||
| rights = copyExports(m) | |||||
| } | } | ||||
| p.addCandidate(imp, &packageInfo{ | p.addCandidate(imp, &packageInfo{ | ||||
| // no name; we already know it. | // no name; we already know it. | ||||
| @@ -584,29 +585,133 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv | |||||
| return fixes, nil | return fixes, nil | ||||
| } | } | ||||
| // getCandidatePkgs returns the list of pkgs that are accessible from filename, | |||||
| // optionall filtered to only packages named pkgName. | |||||
| func getCandidatePkgs(pkgName, filename string, env *ProcessEnv) ([]*pkg, error) { | |||||
| // TODO(heschi): filter out current package. (Don't forget x_test can import x.) | |||||
| var result []*pkg | |||||
| // Start off with the standard library. | |||||
| for importPath := range stdlib { | |||||
| if pkgName != "" && path.Base(importPath) != pkgName { | |||||
| continue | |||||
| } | |||||
| result = append(result, &pkg{ | |||||
| dir: filepath.Join(env.GOROOT, "src", importPath), | |||||
| importPathShort: importPath, | |||||
| packageName: path.Base(importPath), | |||||
| relevance: 0, | |||||
| }) | |||||
| } | |||||
| // Exclude goroot results -- getting them is relatively expensive, not cached, | |||||
| // and generally redundant with the in-memory version. | |||||
| exclude := []gopathwalk.RootType{gopathwalk.RootGOROOT} | |||||
| // Only the go/packages resolver uses the first argument, and nobody uses that resolver. | |||||
| scannedPkgs, err := env.GetResolver().scan(nil, true, exclude) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| dupCheck := map[string]struct{}{} | |||||
| for _, pkg := range scannedPkgs { | |||||
| if pkgName != "" && pkg.packageName != pkgName { | |||||
| continue | |||||
| } | |||||
| if !canUse(filename, pkg.dir) { | |||||
| continue | |||||
| } | |||||
| if _, ok := dupCheck[pkg.importPathShort]; ok { | |||||
| continue | |||||
| } | |||||
| dupCheck[pkg.importPathShort] = struct{}{} | |||||
| result = append(result, pkg) | |||||
| } | |||||
| // Sort first by relevance, then by package name, with import path as a tiebreaker. | |||||
| sort.Slice(result, func(i, j int) bool { | |||||
| pi, pj := result[i], result[j] | |||||
| if pi.relevance != pj.relevance { | |||||
| return pi.relevance < pj.relevance | |||||
| } | |||||
| if pi.packageName != pj.packageName { | |||||
| return pi.packageName < pj.packageName | |||||
| } | |||||
| return pi.importPathShort < pj.importPathShort | |||||
| }) | |||||
| return result, nil | |||||
| } | |||||
| func candidateImportName(pkg *pkg) string { | |||||
| if ImportPathToAssumedName(pkg.importPathShort) != pkg.packageName { | |||||
| return pkg.packageName | |||||
| } | |||||
| return "" | |||||
| } | |||||
| // getAllCandidates gets all of the candidates to be imported, regardless of if they are needed. | // getAllCandidates gets all of the candidates to be imported, regardless of if they are needed. | ||||
| func getAllCandidates(filename string, env *ProcessEnv) ([]ImportFix, error) { | func getAllCandidates(filename string, env *ProcessEnv) ([]ImportFix, error) { | ||||
| // TODO(suzmue): scan for additional candidates and filter out | |||||
| // current package. | |||||
| pkgs, err := getCandidatePkgs("", filename, env) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| result := make([]ImportFix, 0, len(pkgs)) | |||||
| for _, pkg := range pkgs { | |||||
| result = append(result, ImportFix{ | |||||
| StmtInfo: ImportInfo{ | |||||
| ImportPath: pkg.importPathShort, | |||||
| Name: candidateImportName(pkg), | |||||
| }, | |||||
| IdentName: pkg.packageName, | |||||
| FixType: AddImport, | |||||
| }) | |||||
| } | |||||
| return result, nil | |||||
| } | |||||
| // Get the stdlib candidates and sort by import path. | |||||
| var paths []string | |||||
| for importPath := range stdlib { | |||||
| paths = append(paths, importPath) | |||||
| // A PackageExport is a package and its exports. | |||||
| type PackageExport struct { | |||||
| Fix *ImportFix | |||||
| Exports []string | |||||
| } | |||||
| func getPackageExports(completePackage, filename string, env *ProcessEnv) ([]PackageExport, error) { | |||||
| pkgs, err := getCandidatePkgs(completePackage, filename, env) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | } | ||||
| sort.Strings(paths) | |||||
| var imports []ImportFix | |||||
| for _, importPath := range paths { | |||||
| imports = append(imports, ImportFix{ | |||||
| results := make([]PackageExport, 0, len(pkgs)) | |||||
| for _, pkg := range pkgs { | |||||
| fix := &ImportFix{ | |||||
| StmtInfo: ImportInfo{ | StmtInfo: ImportInfo{ | ||||
| ImportPath: importPath, | |||||
| ImportPath: pkg.importPathShort, | |||||
| Name: candidateImportName(pkg), | |||||
| }, | }, | ||||
| IdentName: path.Base(importPath), | |||||
| IdentName: pkg.packageName, | |||||
| FixType: AddImport, | FixType: AddImport, | ||||
| } | |||||
| var exports []string | |||||
| if e, ok := stdlib[pkg.importPathShort]; ok { | |||||
| exports = e | |||||
| } else { | |||||
| exports, err = loadExportsForPackage(context.Background(), env, completePackage, pkg) | |||||
| if err != nil { | |||||
| if env.Debug { | |||||
| env.Logf("while completing %q, error loading exports from %q: %v", completePackage, pkg.importPathShort, err) | |||||
| } | |||||
| continue | |||||
| } | |||||
| } | |||||
| sort.Strings(exports) | |||||
| results = append(results, PackageExport{ | |||||
| Fix: fix, | |||||
| Exports: exports, | |||||
| }) | }) | ||||
| } | } | ||||
| return imports, nil | |||||
| return results, nil | |||||
| } | } | ||||
| // ProcessEnv contains environment variables and settings that affect the use of | // ProcessEnv contains environment variables and settings that affect the use of | ||||
| @@ -678,6 +783,13 @@ func (e *ProcessEnv) buildContext() *build.Context { | |||||
| ctx := build.Default | ctx := build.Default | ||||
| ctx.GOROOT = e.GOROOT | ctx.GOROOT = e.GOROOT | ||||
| ctx.GOPATH = e.GOPATH | ctx.GOPATH = e.GOPATH | ||||
| // As of Go 1.14, build.Context has a WorkingDir field | |||||
| // (see golang.org/issue/34860). | |||||
| // Populate it only if present. | |||||
| if wd := reflect.ValueOf(&ctx).Elem().FieldByName("WorkingDir"); wd.IsValid() && wd.Kind() == reflect.String { | |||||
| wd.SetString(e.WorkingDir) | |||||
| } | |||||
| return &ctx | return &ctx | ||||
| } | } | ||||
| @@ -712,9 +824,10 @@ func cmdDebugStr(cmd *exec.Cmd) string { | |||||
| func addStdlibCandidates(pass *pass, refs references) { | func addStdlibCandidates(pass *pass, refs references) { | ||||
| add := func(pkg string) { | add := func(pkg string) { | ||||
| exports := copyExports(stdlib[pkg]) | |||||
| pass.addCandidate( | pass.addCandidate( | ||||
| &ImportInfo{ImportPath: pkg}, | &ImportInfo{ImportPath: pkg}, | ||||
| &packageInfo{name: path.Base(pkg), exports: stdlib[pkg]}) | |||||
| &packageInfo{name: path.Base(pkg), exports: exports}) | |||||
| } | } | ||||
| for left := range refs { | for left := range refs { | ||||
| if left == "rand" { | if left == "rand" { | ||||
| @@ -735,12 +848,15 @@ func addStdlibCandidates(pass *pass, refs references) { | |||||
| type Resolver interface { | type Resolver interface { | ||||
| // loadPackageNames loads the package names in importPaths. | // loadPackageNames loads the package names in importPaths. | ||||
| loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) | loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) | ||||
| // scan finds (at least) the packages satisfying refs. The returned slice is unordered. | |||||
| scan(refs references) ([]*pkg, error) | |||||
| // scan finds (at least) the packages satisfying refs. If loadNames is true, | |||||
| // package names will be set on the results, and dirs whose package name | |||||
| // could not be determined will be excluded. | |||||
| scan(refs references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) | |||||
| // loadExports returns the set of exported symbols in the package at dir. | // loadExports returns the set of exported symbols in the package at dir. | ||||
| // It returns an error if the package name in dir does not match expectPackage. | |||||
| // loadExports may be called concurrently. | // loadExports may be called concurrently. | ||||
| loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error) | |||||
| loadExports(ctx context.Context, pkg *pkg) (string, []string, error) | |||||
| ClearForNewScan() | |||||
| } | } | ||||
| // gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages. | // gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages. | ||||
| @@ -748,6 +864,8 @@ type goPackagesResolver struct { | |||||
| env *ProcessEnv | env *ProcessEnv | ||||
| } | } | ||||
| func (r *goPackagesResolver) ClearForNewScan() {} | |||||
| func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { | func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { | ||||
| if len(importPaths) == 0 { | if len(importPaths) == 0 { | ||||
| return nil, nil | return nil, nil | ||||
| @@ -766,13 +884,13 @@ func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir strin | |||||
| if _, ok := names[path]; ok { | if _, ok := names[path]; ok { | ||||
| continue | continue | ||||
| } | } | ||||
| names[path] = importPathToAssumedName(path) | |||||
| names[path] = ImportPathToAssumedName(path) | |||||
| } | } | ||||
| return names, nil | return names, nil | ||||
| } | } | ||||
| func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) { | |||||
| func (r *goPackagesResolver) scan(refs references, _ bool, _ []gopathwalk.RootType) ([]*pkg, error) { | |||||
| var loadQueries []string | var loadQueries []string | ||||
| for pkgName := range refs { | for pkgName := range refs { | ||||
| loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName) | loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName) | ||||
| @@ -790,33 +908,34 @@ func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) { | |||||
| dir: filepath.Dir(goPackage.CompiledGoFiles[0]), | dir: filepath.Dir(goPackage.CompiledGoFiles[0]), | ||||
| importPathShort: VendorlessPath(goPackage.PkgPath), | importPathShort: VendorlessPath(goPackage.PkgPath), | ||||
| goPackage: goPackage, | goPackage: goPackage, | ||||
| packageName: goPackage.Name, | |||||
| }) | }) | ||||
| } | } | ||||
| return scan, nil | return scan, nil | ||||
| } | } | ||||
| func (r *goPackagesResolver) loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error) { | |||||
| func (r *goPackagesResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { | |||||
| if pkg.goPackage == nil { | if pkg.goPackage == nil { | ||||
| return nil, fmt.Errorf("goPackage not set") | |||||
| return "", nil, fmt.Errorf("goPackage not set") | |||||
| } | } | ||||
| exports := map[string]bool{} | |||||
| var exports []string | |||||
| fset := token.NewFileSet() | fset := token.NewFileSet() | ||||
| for _, fname := range pkg.goPackage.CompiledGoFiles { | for _, fname := range pkg.goPackage.CompiledGoFiles { | ||||
| f, err := parser.ParseFile(fset, fname, nil, 0) | f, err := parser.ParseFile(fset, fname, nil, 0) | ||||
| if err != nil { | if err != nil { | ||||
| return nil, fmt.Errorf("parsing %s: %v", fname, err) | |||||
| return "", nil, fmt.Errorf("parsing %s: %v", fname, err) | |||||
| } | } | ||||
| for name := range f.Scope.Objects { | for name := range f.Scope.Objects { | ||||
| if ast.IsExported(name) { | if ast.IsExported(name) { | ||||
| exports[name] = true | |||||
| exports = append(exports, name) | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| return exports, nil | |||||
| return pkg.goPackage.Name, exports, nil | |||||
| } | } | ||||
| func addExternalCandidates(pass *pass, refs references, filename string) error { | func addExternalCandidates(pass *pass, refs references, filename string) error { | ||||
| dirScan, err := pass.env.GetResolver().scan(refs) | |||||
| dirScan, err := pass.env.GetResolver().scan(refs, false, nil) | |||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| } | } | ||||
| @@ -887,7 +1006,7 @@ func notIdentifier(ch rune) bool { | |||||
| ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) | ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) | ||||
| } | } | ||||
| // importPathToAssumedName returns the assumed package name of an import path. | |||||
| // ImportPathToAssumedName returns the assumed package name of an import path. | |||||
| // It does this using only string parsing of the import path. | // It does this using only string parsing of the import path. | ||||
| // It picks the last element of the path that does not look like a major | // It picks the last element of the path that does not look like a major | ||||
| // version, and then picks the valid identifier off the start of that element. | // version, and then picks the valid identifier off the start of that element. | ||||
| @@ -895,7 +1014,7 @@ func notIdentifier(ch rune) bool { | |||||
| // clarity. | // clarity. | ||||
| // This function could be moved to a standard package and exported if we want | // This function could be moved to a standard package and exported if we want | ||||
| // for use in other tools. | // for use in other tools. | ||||
| func importPathToAssumedName(importPath string) string { | |||||
| func ImportPathToAssumedName(importPath string) string { | |||||
| base := path.Base(importPath) | base := path.Base(importPath) | ||||
| if strings.HasPrefix(base, "v") { | if strings.HasPrefix(base, "v") { | ||||
| if _, err := strconv.Atoi(base[1:]); err == nil { | if _, err := strconv.Atoi(base[1:]); err == nil { | ||||
| @@ -914,10 +1033,24 @@ func importPathToAssumedName(importPath string) string { | |||||
| // gopathResolver implements resolver for GOPATH workspaces. | // gopathResolver implements resolver for GOPATH workspaces. | ||||
| type gopathResolver struct { | type gopathResolver struct { | ||||
| env *ProcessEnv | |||||
| env *ProcessEnv | |||||
| cache *dirInfoCache | |||||
| } | |||||
| func (r *gopathResolver) init() { | |||||
| if r.cache == nil { | |||||
| r.cache = &dirInfoCache{ | |||||
| dirs: map[string]*directoryPackageInfo{}, | |||||
| } | |||||
| } | |||||
| } | |||||
| func (r *gopathResolver) ClearForNewScan() { | |||||
| r.cache = nil | |||||
| } | } | ||||
| func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { | func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { | ||||
| r.init() | |||||
| names := map[string]string{} | names := map[string]string{} | ||||
| for _, path := range importPaths { | for _, path := range importPaths { | ||||
| names[path] = importPathToName(r.env, path, srcDir) | names[path] = importPathToName(r.env, path, srcDir) | ||||
| @@ -1000,6 +1133,8 @@ type pkg struct { | |||||
| goPackage *packages.Package | goPackage *packages.Package | ||||
| dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") | dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") | ||||
| importPathShort string // vendorless import path ("net/http", "a/b") | importPathShort string // vendorless import path ("net/http", "a/b") | ||||
| packageName string // package name loaded from source if requested | |||||
| relevance int // a weakly-defined score of how relevant a package is. 0 is most relevant. | |||||
| } | } | ||||
| type pkgDistance struct { | type pkgDistance struct { | ||||
| @@ -1043,32 +1178,74 @@ func distance(basepath, targetpath string) int { | |||||
| return strings.Count(p, string(filepath.Separator)) + 1 | return strings.Count(p, string(filepath.Separator)) + 1 | ||||
| } | } | ||||
| func (r *gopathResolver) scan(_ references) ([]*pkg, error) { | |||||
| dupCheck := make(map[string]bool) | |||||
| var result []*pkg | |||||
| var mu sync.Mutex | |||||
| func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) { | |||||
| r.init() | |||||
| add := func(root gopathwalk.Root, dir string) { | add := func(root gopathwalk.Root, dir string) { | ||||
| mu.Lock() | |||||
| defer mu.Unlock() | |||||
| if _, dup := dupCheck[dir]; dup { | |||||
| // We assume cached directories have not changed. We can skip them and their | |||||
| // children. | |||||
| if _, ok := r.cache.Load(dir); ok { | |||||
| return | return | ||||
| } | } | ||||
| dupCheck[dir] = true | |||||
| importpath := filepath.ToSlash(dir[len(root.Path)+len("/"):]) | importpath := filepath.ToSlash(dir[len(root.Path)+len("/"):]) | ||||
| result = append(result, &pkg{ | |||||
| importPathShort: VendorlessPath(importpath), | |||||
| info := directoryPackageInfo{ | |||||
| status: directoryScanned, | |||||
| dir: dir, | |||||
| rootType: root.Type, | |||||
| nonCanonicalImportPath: VendorlessPath(importpath), | |||||
| } | |||||
| r.cache.Store(dir, info) | |||||
| } | |||||
| roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), exclude) | |||||
| gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) | |||||
| var result []*pkg | |||||
| for _, dir := range r.cache.Keys() { | |||||
| info, ok := r.cache.Load(dir) | |||||
| if !ok { | |||||
| continue | |||||
| } | |||||
| if loadNames { | |||||
| var err error | |||||
| info, err = r.cache.CachePackageName(info) | |||||
| if err != nil { | |||||
| continue | |||||
| } | |||||
| } | |||||
| p := &pkg{ | |||||
| importPathShort: info.nonCanonicalImportPath, | |||||
| dir: dir, | dir: dir, | ||||
| }) | |||||
| relevance: 1, | |||||
| packageName: info.packageName, | |||||
| } | |||||
| if info.rootType == gopathwalk.RootGOROOT { | |||||
| p.relevance = 0 | |||||
| } | |||||
| result = append(result, p) | |||||
| } | } | ||||
| gopathwalk.Walk(gopathwalk.SrcDirsRoots(r.env.buildContext()), add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) | |||||
| return result, nil | return result, nil | ||||
| } | } | ||||
| func (r *gopathResolver) loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error) { | |||||
| return loadExportsFromFiles(ctx, r.env, expectPackage, pkg.dir) | |||||
| func filterRoots(roots []gopathwalk.Root, exclude []gopathwalk.RootType) []gopathwalk.Root { | |||||
| var result []gopathwalk.Root | |||||
| outer: | |||||
| for _, root := range roots { | |||||
| for _, i := range exclude { | |||||
| if i == root.Type { | |||||
| continue outer | |||||
| } | |||||
| } | |||||
| result = append(result, root) | |||||
| } | |||||
| return result | |||||
| } | |||||
| func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { | |||||
| r.init() | |||||
| if info, ok := r.cache.Load(pkg.dir); ok { | |||||
| return r.cache.CacheExports(ctx, r.env, info) | |||||
| } | |||||
| return loadExportsFromFiles(ctx, r.env, pkg.dir) | |||||
| } | } | ||||
| // VendorlessPath returns the devendorized version of the import path ipath. | // VendorlessPath returns the devendorized version of the import path ipath. | ||||
| @@ -1084,13 +1261,13 @@ func VendorlessPath(ipath string) string { | |||||
| return ipath | return ipath | ||||
| } | } | ||||
| func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, expectPackage string, dir string) (map[string]bool, error) { | |||||
| exports := make(map[string]bool) | |||||
| func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (string, []string, error) { | |||||
| var exports []string | |||||
| // Look for non-test, buildable .go files which could provide exports. | // Look for non-test, buildable .go files which could provide exports. | ||||
| all, err := ioutil.ReadDir(dir) | all, err := ioutil.ReadDir(dir) | ||||
| if err != nil { | if err != nil { | ||||
| return nil, err | |||||
| return "", nil, err | |||||
| } | } | ||||
| var files []os.FileInfo | var files []os.FileInfo | ||||
| for _, fi := range all { | for _, fi := range all { | ||||
| @@ -1106,47 +1283,42 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, expectPackage st | |||||
| } | } | ||||
| if len(files) == 0 { | if len(files) == 0 { | ||||
| return nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", dir) | |||||
| return "", nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", dir) | |||||
| } | } | ||||
| var pkgName string | |||||
| fset := token.NewFileSet() | fset := token.NewFileSet() | ||||
| for _, fi := range files { | for _, fi := range files { | ||||
| select { | select { | ||||
| case <-ctx.Done(): | case <-ctx.Done(): | ||||
| return nil, ctx.Err() | |||||
| return "", nil, ctx.Err() | |||||
| default: | default: | ||||
| } | } | ||||
| fullFile := filepath.Join(dir, fi.Name()) | fullFile := filepath.Join(dir, fi.Name()) | ||||
| f, err := parser.ParseFile(fset, fullFile, nil, 0) | f, err := parser.ParseFile(fset, fullFile, nil, 0) | ||||
| if err != nil { | if err != nil { | ||||
| return nil, fmt.Errorf("parsing %s: %v", fullFile, err) | |||||
| return "", nil, fmt.Errorf("parsing %s: %v", fullFile, err) | |||||
| } | } | ||||
| pkgName := f.Name.Name | |||||
| if pkgName == "documentation" { | |||||
| if f.Name.Name == "documentation" { | |||||
| // Special case from go/build.ImportDir, not | // Special case from go/build.ImportDir, not | ||||
| // handled by MatchFile above. | // handled by MatchFile above. | ||||
| continue | continue | ||||
| } | } | ||||
| if pkgName != expectPackage { | |||||
| return nil, fmt.Errorf("scan of dir %v is not expected package %v (actually %v)", dir, expectPackage, pkgName) | |||||
| } | |||||
| pkgName = f.Name.Name | |||||
| for name := range f.Scope.Objects { | for name := range f.Scope.Objects { | ||||
| if ast.IsExported(name) { | if ast.IsExported(name) { | ||||
| exports[name] = true | |||||
| exports = append(exports, name) | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| if env.Debug { | if env.Debug { | ||||
| exportList := make([]string, 0, len(exports)) | |||||
| for k := range exports { | |||||
| exportList = append(exportList, k) | |||||
| } | |||||
| sort.Strings(exportList) | |||||
| env.Logf("loaded exports in dir %v (package %v): %v", dir, expectPackage, strings.Join(exportList, ", ")) | |||||
| sortedExports := append([]string(nil), exports...) | |||||
| sort.Strings(sortedExports) | |||||
| env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", ")) | |||||
| } | } | ||||
| return exports, nil | |||||
| return pkgName, exports, nil | |||||
| } | } | ||||
| // findImport searches for a package with the given symbols. | // findImport searches for a package with the given symbols. | ||||
| @@ -1221,7 +1393,7 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, | |||||
| if pass.env.Debug { | if pass.env.Debug { | ||||
| pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) | pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) | ||||
| } | } | ||||
| exports, err := pass.env.GetResolver().loadExports(ctx, pkgName, c.pkg) | |||||
| exports, err := loadExportsForPackage(ctx, pass.env, pkgName, c.pkg) | |||||
| if err != nil { | if err != nil { | ||||
| if pass.env.Debug { | if pass.env.Debug { | ||||
| pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) | pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) | ||||
| @@ -1230,10 +1402,15 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, | |||||
| return | return | ||||
| } | } | ||||
| exportsMap := make(map[string]bool, len(exports)) | |||||
| for _, sym := range exports { | |||||
| exportsMap[sym] = true | |||||
| } | |||||
| // If it doesn't have the right | // If it doesn't have the right | ||||
| // symbols, send nil to mean no match. | // symbols, send nil to mean no match. | ||||
| for symbol := range symbols { | for symbol := range symbols { | ||||
| if !exports[symbol] { | |||||
| if !exportsMap[symbol] { | |||||
| resc <- nil | resc <- nil | ||||
| return | return | ||||
| } | } | ||||
| @@ -1253,6 +1430,17 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, | |||||
| return nil, nil | return nil, nil | ||||
| } | } | ||||
| func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg string, pkg *pkg) ([]string, error) { | |||||
| pkgName, exports, err := env.GetResolver().loadExports(ctx, pkg) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if expectPkg != pkgName { | |||||
| return nil, fmt.Errorf("dir %v is package %v, wanted %v", pkg.dir, pkgName, expectPkg) | |||||
| } | |||||
| return exports, err | |||||
| } | |||||
| // pkgIsCandidate reports whether pkg is a candidate for satisfying the | // pkgIsCandidate reports whether pkg is a candidate for satisfying the | ||||
| // finding which package pkgIdent in the file named by filename is trying | // finding which package pkgIdent in the file named by filename is trying | ||||
| // to refer to. | // to refer to. | ||||
| @@ -1383,3 +1571,11 @@ type visitFn func(node ast.Node) ast.Visitor | |||||
| func (fn visitFn) Visit(node ast.Node) ast.Visitor { | func (fn visitFn) Visit(node ast.Node) ast.Visitor { | ||||
| return fn(node) | return fn(node) | ||||
| } | } | ||||
| func copyExports(pkg []string) map[string]bool { | |||||
| m := make(map[string]bool, len(pkg)) | |||||
| for _, v := range pkg { | |||||
| m[v] = true | |||||
| } | |||||
| return m | |||||
| } | |||||
| @@ -83,35 +83,56 @@ func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, | |||||
| return getFixes(fileSet, file, filename, opt.Env) | return getFixes(fileSet, file, filename, opt.Env) | ||||
| } | } | ||||
| // ApplyFix will apply all of the fixes to the file and format it. | |||||
| func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) (formatted []byte, err error) { | |||||
| // ApplyFixes applies all of the fixes to the file and formats it. extraMode | |||||
| // is added in when parsing the file. | |||||
| func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { | |||||
| src, opt, err = initialize(filename, src, opt) | src, opt, err = initialize(filename, src, opt) | ||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| // Don't use parse() -- we don't care about fragments or statement lists | |||||
| // here, and we need to work with unparseable files. | |||||
| fileSet := token.NewFileSet() | fileSet := token.NewFileSet() | ||||
| file, adjust, err := parse(fileSet, filename, src, opt) | |||||
| if err != nil { | |||||
| parserMode := parser.Mode(0) | |||||
| if opt.Comments { | |||||
| parserMode |= parser.ParseComments | |||||
| } | |||||
| if opt.AllErrors { | |||||
| parserMode |= parser.AllErrors | |||||
| } | |||||
| parserMode |= extraMode | |||||
| file, err := parser.ParseFile(fileSet, filename, src, parserMode) | |||||
| if file == nil { | |||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| // Apply the fixes to the file. | // Apply the fixes to the file. | ||||
| apply(fileSet, file, fixes) | apply(fileSet, file, fixes) | ||||
| return formatFile(fileSet, file, src, adjust, opt) | |||||
| return formatFile(fileSet, file, src, nil, opt) | |||||
| } | } | ||||
| // GetAllCandidates gets all of the standard library candidate packages to import in | // GetAllCandidates gets all of the standard library candidate packages to import in | ||||
| // sorted order on import path. | // sorted order on import path. | ||||
| func GetAllCandidates(filename string, opt *Options) (pkgs []ImportFix, err error) { | func GetAllCandidates(filename string, opt *Options) (pkgs []ImportFix, err error) { | ||||
| _, opt, err = initialize(filename, []byte{}, opt) | |||||
| _, opt, err = initialize(filename, nil, opt) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| return getAllCandidates(filename, opt.Env) | return getAllCandidates(filename, opt.Env) | ||||
| } | } | ||||
| // GetPackageExports returns all known packages with name pkg and their exports. | |||||
| func GetPackageExports(pkg, filename string, opt *Options) (exports []PackageExport, err error) { | |||||
| _, opt, err = initialize(filename, nil, opt) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return getPackageExports(pkg, filename, opt.Env) | |||||
| } | |||||
| // initialize sets the values for opt and src. | // initialize sets the values for opt and src. | ||||
| // If they are provided, they are not changed. Otherwise opt is set to the | // If they are provided, they are not changed. Otherwise opt is set to the | ||||
| // default values and src is read from the file system. | // default values and src is read from the file system. | ||||
| @@ -14,10 +14,10 @@ import ( | |||||
| "strconv" | "strconv" | ||||
| "strings" | "strings" | ||||
| "sync" | "sync" | ||||
| "time" | |||||
| "golang.org/x/tools/internal/gopathwalk" | "golang.org/x/tools/internal/gopathwalk" | ||||
| "golang.org/x/tools/internal/module" | "golang.org/x/tools/internal/module" | ||||
| "golang.org/x/tools/internal/semver" | |||||
| ) | ) | ||||
| // ModuleResolver implements resolver for modules using the go command as little | // ModuleResolver implements resolver for modules using the go command as little | ||||
| @@ -25,38 +25,81 @@ import ( | |||||
| type ModuleResolver struct { | type ModuleResolver struct { | ||||
| env *ProcessEnv | env *ProcessEnv | ||||
| moduleCacheDir string | moduleCacheDir string | ||||
| dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. | |||||
| Initialized bool | Initialized bool | ||||
| Main *ModuleJSON | Main *ModuleJSON | ||||
| ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... | ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... | ||||
| ModsByDir []*ModuleJSON // ...or Dir. | ModsByDir []*ModuleJSON // ...or Dir. | ||||
| // moduleCacheInfo stores information about the module cache. | |||||
| moduleCacheInfo *moduleCacheInfo | |||||
| // moduleCacheCache stores information about the module cache. | |||||
| moduleCacheCache *dirInfoCache | |||||
| otherCache *dirInfoCache | |||||
| } | } | ||||
| type ModuleJSON struct { | type ModuleJSON struct { | ||||
| Path string // module path | |||||
| Version string // module version | |||||
| Versions []string // available module versions (with -versions) | |||||
| Replace *ModuleJSON // replaced by this module | |||||
| Time *time.Time // time version was created | |||||
| Update *ModuleJSON // available update, if any (with -u) | |||||
| Main bool // is this the main module? | |||||
| Indirect bool // is this module only an indirect dependency of main module? | |||||
| Dir string // directory holding files for this module, if any | |||||
| GoMod string // path to go.mod file for this module, if any | |||||
| Error *ModuleErrorJSON // error loading module | |||||
| } | |||||
| type ModuleErrorJSON struct { | |||||
| Err string // the error itself | |||||
| Path string // module path | |||||
| Replace *ModuleJSON // replaced by this module | |||||
| Main bool // is this the main module? | |||||
| Dir string // directory holding files for this module, if any | |||||
| GoMod string // path to go.mod file for this module, if any | |||||
| GoVersion string // go version used in module | |||||
| } | } | ||||
| func (r *ModuleResolver) init() error { | func (r *ModuleResolver) init() error { | ||||
| if r.Initialized { | if r.Initialized { | ||||
| return nil | return nil | ||||
| } | } | ||||
| mainMod, vendorEnabled, err := vendorEnabled(r.env) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if mainMod != nil && vendorEnabled { | |||||
| // Vendor mode is on, so all the non-Main modules are irrelevant, | |||||
| // and we need to search /vendor for everything. | |||||
| r.Main = mainMod | |||||
| r.dummyVendorMod = &ModuleJSON{ | |||||
| Path: "", | |||||
| Dir: filepath.Join(mainMod.Dir, "vendor"), | |||||
| } | |||||
| r.ModsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} | |||||
| r.ModsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} | |||||
| } else { | |||||
| // Vendor mode is off, so run go list -m ... to find everything. | |||||
| r.initAllMods() | |||||
| } | |||||
| r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod") | |||||
| sort.Slice(r.ModsByModPath, func(i, j int) bool { | |||||
| count := func(x int) int { | |||||
| return strings.Count(r.ModsByModPath[x].Path, "/") | |||||
| } | |||||
| return count(j) < count(i) // descending order | |||||
| }) | |||||
| sort.Slice(r.ModsByDir, func(i, j int) bool { | |||||
| count := func(x int) int { | |||||
| return strings.Count(r.ModsByDir[x].Dir, "/") | |||||
| } | |||||
| return count(j) < count(i) // descending order | |||||
| }) | |||||
| if r.moduleCacheCache == nil { | |||||
| r.moduleCacheCache = &dirInfoCache{ | |||||
| dirs: map[string]*directoryPackageInfo{}, | |||||
| } | |||||
| } | |||||
| if r.otherCache == nil { | |||||
| r.otherCache = &dirInfoCache{ | |||||
| dirs: map[string]*directoryPackageInfo{}, | |||||
| } | |||||
| } | |||||
| r.Initialized = true | |||||
| return nil | |||||
| } | |||||
| func (r *ModuleResolver) initAllMods() error { | |||||
| stdout, err := r.env.invokeGo("list", "-m", "-json", "...") | stdout, err := r.env.invokeGo("list", "-m", "-json", "...") | ||||
| if err != nil { | if err != nil { | ||||
| return err | return err | ||||
| @@ -79,28 +122,21 @@ func (r *ModuleResolver) init() error { | |||||
| r.Main = mod | r.Main = mod | ||||
| } | } | ||||
| } | } | ||||
| return nil | |||||
| } | |||||
| sort.Slice(r.ModsByModPath, func(i, j int) bool { | |||||
| count := func(x int) int { | |||||
| return strings.Count(r.ModsByModPath[x].Path, "/") | |||||
| } | |||||
| return count(j) < count(i) // descending order | |||||
| }) | |||||
| sort.Slice(r.ModsByDir, func(i, j int) bool { | |||||
| count := func(x int) int { | |||||
| return strings.Count(r.ModsByDir[x].Dir, "/") | |||||
| } | |||||
| return count(j) < count(i) // descending order | |||||
| }) | |||||
| if r.moduleCacheInfo == nil { | |||||
| r.moduleCacheInfo = &moduleCacheInfo{ | |||||
| modCacheDirInfo: make(map[string]*directoryPackageInfo), | |||||
| } | |||||
| func (r *ModuleResolver) ClearForNewScan() { | |||||
| r.otherCache = &dirInfoCache{ | |||||
| dirs: map[string]*directoryPackageInfo{}, | |||||
| } | } | ||||
| } | |||||
| r.Initialized = true | |||||
| return nil | |||||
| func (r *ModuleResolver) ClearForNewMod() { | |||||
| env := r.env | |||||
| *r = ModuleResolver{ | |||||
| env: env, | |||||
| } | |||||
| r.init() | |||||
| } | } | ||||
| // findPackage returns the module and directory that contains the package at | // findPackage returns the module and directory that contains the package at | ||||
| @@ -118,22 +154,31 @@ func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { | |||||
| continue | continue | ||||
| } | } | ||||
| if info, ok := r.moduleCacheInfo.Load(pkgDir); ok { | |||||
| if packageScanned, err := info.reachedStatus(directoryScanned); packageScanned { | |||||
| if info, ok := r.cacheLoad(pkgDir); ok { | |||||
| if loaded, err := info.reachedStatus(nameLoaded); loaded { | |||||
| if err != nil { | if err != nil { | ||||
| // There was some error with scanning this directory. | |||||
| // It does not contain a valid package. | |||||
| continue | |||||
| continue // No package in this dir. | |||||
| } | } | ||||
| return m, pkgDir | return m, pkgDir | ||||
| } | } | ||||
| if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil { | |||||
| continue // Dir is unreadable, etc. | |||||
| } | |||||
| // This is slightly wrong: a directory doesn't have to have an | |||||
| // importable package to count as a package for package-to-module | |||||
| // resolution. package main or _test files should count but | |||||
| // don't. | |||||
| // TODO(heschi): fix this. | |||||
| if _, err := r.cachePackageName(info); err == nil { | |||||
| return m, pkgDir | |||||
| } | |||||
| } | } | ||||
| // Not cached. Read the filesystem. | |||||
| pkgFiles, err := ioutil.ReadDir(pkgDir) | pkgFiles, err := ioutil.ReadDir(pkgDir) | ||||
| if err != nil { | if err != nil { | ||||
| continue | continue | ||||
| } | } | ||||
| // A module only contains a package if it has buildable go | // A module only contains a package if it has buildable go | ||||
| // files in that directory. If not, it could be provided by an | // files in that directory. If not, it could be provided by an | ||||
| // outer module. See #29736. | // outer module. See #29736. | ||||
| @@ -146,6 +191,40 @@ func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { | |||||
| return nil, "" | return nil, "" | ||||
| } | } | ||||
| func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) { | |||||
| if info, ok := r.moduleCacheCache.Load(dir); ok { | |||||
| return info, ok | |||||
| } | |||||
| return r.otherCache.Load(dir) | |||||
| } | |||||
| func (r *ModuleResolver) cacheStore(info directoryPackageInfo) { | |||||
| if info.rootType == gopathwalk.RootModuleCache { | |||||
| r.moduleCacheCache.Store(info.dir, info) | |||||
| } else { | |||||
| r.otherCache.Store(info.dir, info) | |||||
| } | |||||
| } | |||||
| func (r *ModuleResolver) cacheKeys() []string { | |||||
| return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...) | |||||
| } | |||||
| // cachePackageName caches the package name for a dir already in the cache. | |||||
| func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) { | |||||
| if info.rootType == gopathwalk.RootModuleCache { | |||||
| return r.moduleCacheCache.CachePackageName(info) | |||||
| } | |||||
| return r.otherCache.CachePackageName(info) | |||||
| } | |||||
| func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { | |||||
| if info.rootType == gopathwalk.RootModuleCache { | |||||
| return r.moduleCacheCache.CacheExports(ctx, env, info) | |||||
| } | |||||
| return r.otherCache.CacheExports(ctx, env, info) | |||||
| } | |||||
| // findModuleByDir returns the module that contains dir, or nil if no such | // findModuleByDir returns the module that contains dir, or nil if no such | ||||
| // module is in scope. | // module is in scope. | ||||
| func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { | func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { | ||||
| @@ -184,28 +263,45 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *ModuleJSON) bool { | |||||
| // so it cannot be a nested module. | // so it cannot be a nested module. | ||||
| return false | return false | ||||
| } | } | ||||
| mf := r.findModFile(dir) | |||||
| if mf == "" { | |||||
| if mod != nil && mod == r.dummyVendorMod { | |||||
| // The /vendor pseudomodule is flattened and doesn't actually count. | |||||
| return false | return false | ||||
| } | } | ||||
| return filepath.Dir(mf) != mod.Dir | |||||
| modDir, _ := r.modInfo(dir) | |||||
| if modDir == "" { | |||||
| return false | |||||
| } | |||||
| return modDir != mod.Dir | |||||
| } | } | ||||
| func (r *ModuleResolver) findModFile(dir string) string { | |||||
| func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { | |||||
| readModName := func(modFile string) string { | |||||
| modBytes, err := ioutil.ReadFile(modFile) | |||||
| if err != nil { | |||||
| return "" | |||||
| } | |||||
| return modulePath(modBytes) | |||||
| } | |||||
| if r.dirInModuleCache(dir) { | if r.dirInModuleCache(dir) { | ||||
| matches := modCacheRegexp.FindStringSubmatch(dir) | matches := modCacheRegexp.FindStringSubmatch(dir) | ||||
| index := strings.Index(dir, matches[1]+"@"+matches[2]) | index := strings.Index(dir, matches[1]+"@"+matches[2]) | ||||
| return filepath.Join(dir[:index], matches[1]+"@"+matches[2], "go.mod") | |||||
| modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) | |||||
| return modDir, readModName(filepath.Join(modDir, "go.mod")) | |||||
| } | } | ||||
| for { | for { | ||||
| if info, ok := r.cacheLoad(dir); ok { | |||||
| return info.moduleDir, info.moduleName | |||||
| } | |||||
| f := filepath.Join(dir, "go.mod") | f := filepath.Join(dir, "go.mod") | ||||
| info, err := os.Stat(f) | info, err := os.Stat(f) | ||||
| if err == nil && !info.IsDir() { | if err == nil && !info.IsDir() { | ||||
| return f | |||||
| return dir, readModName(f) | |||||
| } | } | ||||
| d := filepath.Dir(dir) | d := filepath.Dir(dir) | ||||
| if len(d) >= len(dir) { | if len(d) >= len(dir) { | ||||
| return "" // reached top of file system, no go.mod | |||||
| return "", "" // reached top of file system, no go.mod | |||||
| } | } | ||||
| dir = d | dir = d | ||||
| } | } | ||||
| @@ -237,7 +333,7 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) ( | |||||
| return names, nil | return names, nil | ||||
| } | } | ||||
| func (r *ModuleResolver) scan(_ references) ([]*pkg, error) { | |||||
| func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) { | |||||
| if err := r.init(); err != nil { | if err := r.init(); err != nil { | ||||
| return nil, err | return nil, err | ||||
| } | } | ||||
| @@ -249,35 +345,30 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) { | |||||
| if r.Main != nil { | if r.Main != nil { | ||||
| roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule}) | roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule}) | ||||
| } | } | ||||
| if r.moduleCacheDir == "" { | |||||
| r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod") | |||||
| } | |||||
| roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) | |||||
| // Walk replace targets, just in case they're not in any of the above. | |||||
| for _, mod := range r.ModsByModPath { | |||||
| if mod.Replace != nil { | |||||
| roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) | |||||
| if r.dummyVendorMod != nil { | |||||
| roots = append(roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) | |||||
| } else { | |||||
| roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) | |||||
| // Walk replace targets, just in case they're not in any of the above. | |||||
| for _, mod := range r.ModsByModPath { | |||||
| if mod.Replace != nil { | |||||
| roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| roots = filterRoots(roots, exclude) | |||||
| var result []*pkg | var result []*pkg | ||||
| dupCheck := make(map[string]bool) | |||||
| var mu sync.Mutex | var mu sync.Mutex | ||||
| // Packages in the module cache are immutable. If we have | |||||
| // already seen this package on a previous scan of the module | |||||
| // cache, return that result. | |||||
| // We assume cached directories have not changed. We can skip them and their | |||||
| // children. | |||||
| skip := func(root gopathwalk.Root, dir string) bool { | skip := func(root gopathwalk.Root, dir string) bool { | ||||
| mu.Lock() | mu.Lock() | ||||
| defer mu.Unlock() | defer mu.Unlock() | ||||
| // If we have already processed this directory on this walk, skip it. | |||||
| if _, dup := dupCheck[dir]; dup { | |||||
| return true | |||||
| } | |||||
| // If we have saved this directory information, skip it. | |||||
| info, ok := r.moduleCacheInfo.Load(dir) | |||||
| info, ok := r.cacheLoad(dir) | |||||
| if !ok { | if !ok { | ||||
| return false | return false | ||||
| } | } | ||||
| @@ -288,44 +379,19 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) { | |||||
| return packageScanned | return packageScanned | ||||
| } | } | ||||
| // Add anything new to the cache. We'll process everything in it below. | |||||
| add := func(root gopathwalk.Root, dir string) { | add := func(root gopathwalk.Root, dir string) { | ||||
| mu.Lock() | mu.Lock() | ||||
| defer mu.Unlock() | defer mu.Unlock() | ||||
| if _, dup := dupCheck[dir]; dup { | |||||
| return | |||||
| } | |||||
| info, err := r.scanDirForPackage(root, dir) | |||||
| if err != nil { | |||||
| return | |||||
| } | |||||
| if root.Type == gopathwalk.RootModuleCache { | |||||
| // Save this package information in the cache and return. | |||||
| // Packages from the module cache are added after Walk. | |||||
| r.moduleCacheInfo.Store(dir, info) | |||||
| return | |||||
| } | |||||
| // Skip this package if there was an error loading package info. | |||||
| if info.err != nil { | |||||
| return | |||||
| } | |||||
| // The rest of this function canonicalizes the packages using the results | |||||
| // of initializing the resolver from 'go list -m'. | |||||
| res, err := r.canonicalize(info.nonCanonicalImportPath, info.dir, info.needsReplace) | |||||
| if err != nil { | |||||
| return | |||||
| } | |||||
| result = append(result, res) | |||||
| r.cacheStore(r.scanDirForPackage(root, dir)) | |||||
| } | } | ||||
| gopathwalk.WalkSkip(roots, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) | gopathwalk.WalkSkip(roots, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) | ||||
| // Add the packages from the modules in the mod cache that were skipped. | |||||
| for _, dir := range r.moduleCacheInfo.Keys() { | |||||
| info, ok := r.moduleCacheInfo.Load(dir) | |||||
| // Everything we already had, and everything new, is now in the cache. | |||||
| for _, dir := range r.cacheKeys() { | |||||
| info, ok := r.cacheLoad(dir) | |||||
| if !ok { | if !ok { | ||||
| continue | continue | ||||
| } | } | ||||
| @@ -335,7 +401,15 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) { | |||||
| continue | continue | ||||
| } | } | ||||
| res, err := r.canonicalize(info.nonCanonicalImportPath, info.dir, info.needsReplace) | |||||
| // If we want package names, make sure the cache has them. | |||||
| if loadNames { | |||||
| var err error | |||||
| if info, err = r.cachePackageName(info); err != nil { | |||||
| continue | |||||
| } | |||||
| } | |||||
| res, err := r.canonicalize(info) | |||||
| if err != nil { | if err != nil { | ||||
| continue | continue | ||||
| } | } | ||||
| @@ -347,52 +421,70 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) { | |||||
| // canonicalize gets the result of canonicalizing the packages using the results | // canonicalize gets the result of canonicalizing the packages using the results | ||||
| // of initializing the resolver from 'go list -m'. | // of initializing the resolver from 'go list -m'. | ||||
| func (r *ModuleResolver) canonicalize(importPath, dir string, needsReplace bool) (res *pkg, err error) { | |||||
| func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { | |||||
| // Packages in GOROOT are already canonical, regardless of the std/cmd modules. | |||||
| if info.rootType == gopathwalk.RootGOROOT { | |||||
| return &pkg{ | |||||
| importPathShort: info.nonCanonicalImportPath, | |||||
| dir: info.dir, | |||||
| packageName: path.Base(info.nonCanonicalImportPath), | |||||
| relevance: 0, | |||||
| }, nil | |||||
| } | |||||
| importPath := info.nonCanonicalImportPath | |||||
| relevance := 2 | |||||
| // Check if the directory is underneath a module that's in scope. | // Check if the directory is underneath a module that's in scope. | ||||
| if mod := r.findModuleByDir(dir); mod != nil { | |||||
| if mod := r.findModuleByDir(info.dir); mod != nil { | |||||
| relevance = 1 | |||||
| // It is. If dir is the target of a replace directive, | // It is. If dir is the target of a replace directive, | ||||
| // our guessed import path is wrong. Use the real one. | // our guessed import path is wrong. Use the real one. | ||||
| if mod.Dir == dir { | |||||
| if mod.Dir == info.dir { | |||||
| importPath = mod.Path | importPath = mod.Path | ||||
| } else { | } else { | ||||
| dirInMod := dir[len(mod.Dir)+len("/"):] | |||||
| dirInMod := info.dir[len(mod.Dir)+len("/"):] | |||||
| importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) | importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) | ||||
| } | } | ||||
| } else if needsReplace { | |||||
| return nil, fmt.Errorf("needed this package to be in scope: %s", dir) | |||||
| } else if info.needsReplace { | |||||
| return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir) | |||||
| } | } | ||||
| res := &pkg{ | |||||
| importPathShort: importPath, | |||||
| dir: info.dir, | |||||
| packageName: info.packageName, // may not be populated if the caller didn't ask for it | |||||
| relevance: relevance, | |||||
| } | |||||
| // We may have discovered a package that has a different version | // We may have discovered a package that has a different version | ||||
| // in scope already. Canonicalize to that one if possible. | // in scope already. Canonicalize to that one if possible. | ||||
| if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" { | if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" { | ||||
| dir = canonicalDir | |||||
| res.dir = canonicalDir | |||||
| } | } | ||||
| return &pkg{ | |||||
| importPathShort: VendorlessPath(importPath), | |||||
| dir: dir, | |||||
| }, nil | |||||
| return res, nil | |||||
| } | } | ||||
| func (r *ModuleResolver) loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error) { | |||||
| func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { | |||||
| if err := r.init(); err != nil { | if err := r.init(); err != nil { | ||||
| return nil, err | |||||
| return "", nil, err | |||||
| } | } | ||||
| return loadExportsFromFiles(ctx, r.env, expectPackage, pkg.dir) | |||||
| if info, ok := r.cacheLoad(pkg.dir); ok { | |||||
| return r.cacheExports(ctx, r.env, info) | |||||
| } | |||||
| return loadExportsFromFiles(ctx, r.env, pkg.dir) | |||||
| } | } | ||||
| func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) (directoryPackageInfo, error) { | |||||
| func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { | |||||
| subdir := "" | subdir := "" | ||||
| if dir != root.Path { | if dir != root.Path { | ||||
| subdir = dir[len(root.Path)+len("/"):] | subdir = dir[len(root.Path)+len("/"):] | ||||
| } | } | ||||
| importPath := filepath.ToSlash(subdir) | importPath := filepath.ToSlash(subdir) | ||||
| if strings.HasPrefix(importPath, "vendor/") { | if strings.HasPrefix(importPath, "vendor/") { | ||||
| // Ignore vendor dirs. If -mod=vendor is on, then things | |||||
| // should mostly just work, but when it's not vendor/ | |||||
| // is a mess. There's no easy way to tell if it's on. | |||||
| // We can still find things in the mod cache and | |||||
| // map them into /vendor when -mod=vendor is on. | |||||
| return directoryPackageInfo{}, fmt.Errorf("vendor directory") | |||||
| // Only enter vendor directories if they're explicitly requested as a root. | |||||
| return directoryPackageInfo{ | |||||
| status: directoryScanned, | |||||
| err: fmt.Errorf("unwanted vendor directory"), | |||||
| } | |||||
| } | } | ||||
| switch root.Type { | switch root.Type { | ||||
| case gopathwalk.RootCurrentModule: | case gopathwalk.RootCurrentModule: | ||||
| @@ -403,7 +495,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) (di | |||||
| return directoryPackageInfo{ | return directoryPackageInfo{ | ||||
| status: directoryScanned, | status: directoryScanned, | ||||
| err: fmt.Errorf("invalid module cache path: %v", subdir), | err: fmt.Errorf("invalid module cache path: %v", subdir), | ||||
| }, nil | |||||
| } | |||||
| } | } | ||||
| modPath, err := module.DecodePath(filepath.ToSlash(matches[1])) | modPath, err := module.DecodePath(filepath.ToSlash(matches[1])) | ||||
| if err != nil { | if err != nil { | ||||
| @@ -413,35 +505,34 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) (di | |||||
| return directoryPackageInfo{ | return directoryPackageInfo{ | ||||
| status: directoryScanned, | status: directoryScanned, | ||||
| err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), | err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), | ||||
| }, nil | |||||
| } | |||||
| } | } | ||||
| importPath = path.Join(modPath, filepath.ToSlash(matches[3])) | importPath = path.Join(modPath, filepath.ToSlash(matches[3])) | ||||
| case gopathwalk.RootGOROOT: | |||||
| importPath = subdir | |||||
| } | } | ||||
| modDir, modName := r.modInfo(dir) | |||||
| result := directoryPackageInfo{ | result := directoryPackageInfo{ | ||||
| status: directoryScanned, | status: directoryScanned, | ||||
| dir: dir, | dir: dir, | ||||
| rootType: root.Type, | |||||
| nonCanonicalImportPath: importPath, | nonCanonicalImportPath: importPath, | ||||
| needsReplace: false, | needsReplace: false, | ||||
| moduleDir: modDir, | |||||
| moduleName: modName, | |||||
| } | } | ||||
| if root.Type == gopathwalk.RootGOROOT { | if root.Type == gopathwalk.RootGOROOT { | ||||
| // stdlib packages are always in scope, despite the confusing go.mod | // stdlib packages are always in scope, despite the confusing go.mod | ||||
| return result, nil | |||||
| return result | |||||
| } | } | ||||
| // Check that this package is not obviously impossible to import. | // Check that this package is not obviously impossible to import. | ||||
| modFile := r.findModFile(dir) | |||||
| modBytes, err := ioutil.ReadFile(modFile) | |||||
| if err == nil && !strings.HasPrefix(importPath, modulePath(modBytes)) { | |||||
| if !strings.HasPrefix(importPath, modName) { | |||||
| // The module's declared path does not match | // The module's declared path does not match | ||||
| // its expected path. It probably needs a | // its expected path. It probably needs a | ||||
| // replace directive we don't have. | // replace directive we don't have. | ||||
| result.needsReplace = true | result.needsReplace = true | ||||
| } | } | ||||
| return result, nil | |||||
| return result | |||||
| } | } | ||||
| // modCacheRegexp splits a path in a module cache into module, module version, and package. | // modCacheRegexp splits a path in a module cache into module, module version, and package. | ||||
| @@ -490,3 +581,63 @@ func modulePath(mod []byte) string { | |||||
| } | } | ||||
| return "" // missing module path | return "" // missing module path | ||||
| } | } | ||||
| var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) | |||||
| // vendorEnabled indicates if vendoring is enabled. | |||||
| // Inspired by setDefaultBuildMod in modload/init.go | |||||
| func vendorEnabled(env *ProcessEnv) (*ModuleJSON, bool, error) { | |||||
| mainMod, go114, err := getMainModuleAnd114(env) | |||||
| if err != nil { | |||||
| return nil, false, err | |||||
| } | |||||
| matches := modFlagRegexp.FindStringSubmatch(env.GOFLAGS) | |||||
| var modFlag string | |||||
| if len(matches) != 0 { | |||||
| modFlag = matches[1] | |||||
| } | |||||
| if modFlag != "" { | |||||
| // Don't override an explicit '-mod=' argument. | |||||
| return mainMod, modFlag == "vendor", nil | |||||
| } | |||||
| if mainMod == nil || !go114 { | |||||
| return mainMod, false, nil | |||||
| } | |||||
| // Check 1.14's automatic vendor mode. | |||||
| if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { | |||||
| if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { | |||||
| // The Go version is at least 1.14, and a vendor directory exists. | |||||
| // Set -mod=vendor by default. | |||||
| return mainMod, true, nil | |||||
| } | |||||
| } | |||||
| return mainMod, false, nil | |||||
| } | |||||
| // getMainModuleAnd114 gets the main module's information and whether the | |||||
| // go command in use is 1.14+. This is the information needed to figure out | |||||
| // if vendoring should be enabled. | |||||
| func getMainModuleAnd114(env *ProcessEnv) (*ModuleJSON, bool, error) { | |||||
| const format = `{{.Path}} | |||||
| {{.Dir}} | |||||
| {{.GoMod}} | |||||
| {{.GoVersion}} | |||||
| {{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} | |||||
| ` | |||||
| stdout, err := env.invokeGo("list", "-m", "-f", format) | |||||
| if err != nil { | |||||
| return nil, false, nil | |||||
| } | |||||
| lines := strings.Split(stdout.String(), "\n") | |||||
| if len(lines) < 5 { | |||||
| return nil, false, fmt.Errorf("unexpected stdout: %q", stdout) | |||||
| } | |||||
| mod := &ModuleJSON{ | |||||
| Path: lines[0], | |||||
| Dir: lines[1], | |||||
| GoMod: lines[2], | |||||
| GoVersion: lines[3], | |||||
| Main: true, | |||||
| } | |||||
| return mod, lines[4] == "go1.14", nil | |||||
| } | |||||
| @@ -1,12 +1,13 @@ | |||||
| package imports | package imports | ||||
| import ( | import ( | ||||
| "context" | |||||
| "fmt" | |||||
| "sync" | "sync" | ||||
| "golang.org/x/tools/internal/gopathwalk" | |||||
| ) | ) | ||||
| // ModuleResolver implements Resolver for modules using the go command as little | |||||
| // as feasible. | |||||
| // | |||||
| // To find packages to import, the resolver needs to know about all of the | // To find packages to import, the resolver needs to know about all of the | ||||
| // the packages that could be imported. This includes packages that are | // the packages that could be imported. This includes packages that are | ||||
| // already in modules that are in (1) the current module, (2) replace targets, | // already in modules that are in (1) the current module, (2) replace targets, | ||||
| @@ -30,6 +31,8 @@ type directoryPackageStatus int | |||||
| const ( | const ( | ||||
| _ directoryPackageStatus = iota | _ directoryPackageStatus = iota | ||||
| directoryScanned | directoryScanned | ||||
| nameLoaded | |||||
| exportsLoaded | |||||
| ) | ) | ||||
| type directoryPackageInfo struct { | type directoryPackageInfo struct { | ||||
| @@ -38,17 +41,30 @@ type directoryPackageInfo struct { | |||||
| // err is non-nil when there was an error trying to reach status. | // err is non-nil when there was an error trying to reach status. | ||||
| err error | err error | ||||
| // Set when status > directoryScanned. | |||||
| // Set when status >= directoryScanned. | |||||
| // dir is the absolute directory of this package. | // dir is the absolute directory of this package. | ||||
| dir string | |||||
| // nonCanonicalImportPath is the expected import path for this package. | |||||
| // This may not be an import path that can be used to import this package. | |||||
| dir string | |||||
| rootType gopathwalk.RootType | |||||
| // nonCanonicalImportPath is the package's expected import path. It may | |||||
| // not actually be importable at that path. | |||||
| nonCanonicalImportPath string | nonCanonicalImportPath string | ||||
| // needsReplace is true if the nonCanonicalImportPath does not match the | // needsReplace is true if the nonCanonicalImportPath does not match the | ||||
| // the modules declared path, making it impossible to import without a | |||||
| // module's declared path, making it impossible to import without a | |||||
| // replace directive. | // replace directive. | ||||
| needsReplace bool | needsReplace bool | ||||
| // Module-related information. | |||||
| moduleDir string // The directory that is the module root of this dir. | |||||
| moduleName string // The module name that contains this dir. | |||||
| // Set when status >= nameLoaded. | |||||
| packageName string // the package name, as declared in the source. | |||||
| // Set when status >= exportsLoaded. | |||||
| exports []string | |||||
| } | } | ||||
| // reachedStatus returns true when info has a status at least target and any error associated with | // reachedStatus returns true when info has a status at least target and any error associated with | ||||
| @@ -63,8 +79,8 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( | |||||
| return true, nil | return true, nil | ||||
| } | } | ||||
| // moduleCacheInfo is a concurrency safe map for storing information about | |||||
| // the directories in the module cache. | |||||
| // dirInfoCache is a concurrency safe map for storing information about | |||||
| // directories that may contain packages. | |||||
| // | // | ||||
| // The information in this cache is built incrementally. Entries are initialized in scan. | // The information in this cache is built incrementally. Entries are initialized in scan. | ||||
| // No new keys should be added in any other functions, as all directories containing | // No new keys should be added in any other functions, as all directories containing | ||||
| @@ -73,37 +89,30 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( | |||||
| // Other functions, including loadExports and findPackage, may update entries in this cache | // Other functions, including loadExports and findPackage, may update entries in this cache | ||||
| // as they discover new things about the directory. | // as they discover new things about the directory. | ||||
| // | // | ||||
| // We do not need to protect the data in the cache for multiple writes, because it only stores | |||||
| // module cache directories, which do not change. If two competing stores take place, there will be | |||||
| // one store that wins. Although this could result in a loss of information it will not be incorrect | |||||
| // and may just result in recomputing the same result later. | |||||
| // The information in the cache is not expected to change for the cache's | |||||
| // lifetime, so there is no protection against competing writes. Users should | |||||
| // take care not to hold the cache across changes to the underlying files. | |||||
| // | // | ||||
| // TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc) | // TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc) | ||||
| type moduleCacheInfo struct { | |||||
| type dirInfoCache struct { | |||||
| mu sync.Mutex | mu sync.Mutex | ||||
| // modCacheDirInfo stores information about packages in | |||||
| // module cache directories. Keyed by absolute directory. | |||||
| modCacheDirInfo map[string]*directoryPackageInfo | |||||
| // dirs stores information about packages in directories, keyed by absolute path. | |||||
| dirs map[string]*directoryPackageInfo | |||||
| } | } | ||||
| // Store stores the package info for dir. | // Store stores the package info for dir. | ||||
| func (d *moduleCacheInfo) Store(dir string, info directoryPackageInfo) { | |||||
| func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { | |||||
| d.mu.Lock() | d.mu.Lock() | ||||
| defer d.mu.Unlock() | defer d.mu.Unlock() | ||||
| d.modCacheDirInfo[dir] = &directoryPackageInfo{ | |||||
| status: info.status, | |||||
| err: info.err, | |||||
| dir: info.dir, | |||||
| nonCanonicalImportPath: info.nonCanonicalImportPath, | |||||
| needsReplace: info.needsReplace, | |||||
| } | |||||
| stored := info // defensive copy | |||||
| d.dirs[dir] = &stored | |||||
| } | } | ||||
| // Load returns a copy of the directoryPackageInfo for absolute directory dir. | // Load returns a copy of the directoryPackageInfo for absolute directory dir. | ||||
| func (d *moduleCacheInfo) Load(dir string) (directoryPackageInfo, bool) { | |||||
| func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { | |||||
| d.mu.Lock() | d.mu.Lock() | ||||
| defer d.mu.Unlock() | defer d.mu.Unlock() | ||||
| info, ok := d.modCacheDirInfo[dir] | |||||
| info, ok := d.dirs[dir] | |||||
| if !ok { | if !ok { | ||||
| return directoryPackageInfo{}, false | return directoryPackageInfo{}, false | ||||
| } | } | ||||
| @@ -111,11 +120,46 @@ func (d *moduleCacheInfo) Load(dir string) (directoryPackageInfo, bool) { | |||||
| } | } | ||||
| // Keys returns the keys currently present in d. | // Keys returns the keys currently present in d. | ||||
| func (d *moduleCacheInfo) Keys() (keys []string) { | |||||
| func (d *dirInfoCache) Keys() (keys []string) { | |||||
| d.mu.Lock() | d.mu.Lock() | ||||
| defer d.mu.Unlock() | defer d.mu.Unlock() | ||||
| for key := range d.modCacheDirInfo { | |||||
| for key := range d.dirs { | |||||
| keys = append(keys, key) | keys = append(keys, key) | ||||
| } | } | ||||
| return keys | return keys | ||||
| } | } | ||||
| func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) { | |||||
| if loaded, err := info.reachedStatus(nameLoaded); loaded { | |||||
| return info, err | |||||
| } | |||||
| if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { | |||||
| return info, fmt.Errorf("cannot read package name, scan error: %v", err) | |||||
| } | |||||
| info.packageName, info.err = packageDirToName(info.dir) | |||||
| info.status = nameLoaded | |||||
| d.Store(info.dir, info) | |||||
| return info, info.err | |||||
| } | |||||
| func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { | |||||
| if reached, _ := info.reachedStatus(exportsLoaded); reached { | |||||
| return info.packageName, info.exports, info.err | |||||
| } | |||||
| if reached, err := info.reachedStatus(nameLoaded); reached && err != nil { | |||||
| return "", nil, err | |||||
| } | |||||
| info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir) | |||||
| if info.err == context.Canceled { | |||||
| return info.packageName, info.exports, info.err | |||||
| } | |||||
| // The cache structure wants things to proceed linearly. We can skip a | |||||
| // step here, but only if we succeed. | |||||
| if info.status == nameLoaded || info.err == nil { | |||||
| info.status = exportsLoaded | |||||
| } else { | |||||
| info.status = nameLoaded | |||||
| } | |||||
| d.Store(info.dir, info) | |||||
| return info.packageName, info.exports, info.err | |||||
| } | |||||
| @@ -439,7 +439,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw | |||||
| go.mongodb.org/mongo-driver/bson/bsontype | go.mongodb.org/mongo-driver/bson/bsontype | ||||
| go.mongodb.org/mongo-driver/bson/primitive | go.mongodb.org/mongo-driver/bson/primitive | ||||
| go.mongodb.org/mongo-driver/x/bsonx/bsoncore | go.mongodb.org/mongo-driver/x/bsonx/bsoncore | ||||
| # golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f | |||||
| # golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 | |||||
| golang.org/x/crypto/acme | golang.org/x/crypto/acme | ||||
| golang.org/x/crypto/acme/autocert | golang.org/x/crypto/acme/autocert | ||||
| golang.org/x/crypto/argon2 | golang.org/x/crypto/argon2 | ||||
| @@ -508,7 +508,7 @@ golang.org/x/text/transform | |||||
| golang.org/x/text/unicode/bidi | golang.org/x/text/unicode/bidi | ||||
| golang.org/x/text/unicode/norm | golang.org/x/text/unicode/norm | ||||
| golang.org/x/text/width | golang.org/x/text/width | ||||
| # golang.org/x/tools v0.0.0-20190910221609-7f5965fd7709 | |||||
| # golang.org/x/tools v0.0.0-20191213221258-04c2e8eff935 | |||||
| golang.org/x/tools/go/ast/astutil | golang.org/x/tools/go/ast/astutil | ||||
| golang.org/x/tools/go/buildutil | golang.org/x/tools/go/buildutil | ||||
| golang.org/x/tools/go/gcexportdata | golang.org/x/tools/go/gcexportdata | ||||